mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-09 16:00:22 +00:00
rebase: update kubernetes in api folder
updating the kubernetes to 1.31.0 in the api folder. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
parent
2c0e65b828
commit
63c4c05b35
20
api/go.mod
20
api/go.mod
@ -6,26 +6,28 @@ require (
|
|||||||
github.com/ghodss/yaml v1.0.0
|
github.com/ghodss/yaml v1.0.0
|
||||||
github.com/openshift/api v0.0.0-20240115183315-0793e918179d
|
github.com/openshift/api v0.0.0-20240115183315-0793e918179d
|
||||||
github.com/stretchr/testify v1.9.0
|
github.com/stretchr/testify v1.9.0
|
||||||
k8s.io/api v0.30.3
|
k8s.io/api v0.31.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
github.com/go-logr/logr v1.4.1 // indirect
|
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||||
|
github.com/go-logr/logr v1.4.2 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/google/gofuzz v1.2.0 // indirect
|
github.com/google/gofuzz v1.2.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
golang.org/x/net v0.23.0 // indirect
|
github.com/x448/float16 v0.8.4 // indirect
|
||||||
golang.org/x/text v0.14.0 // indirect
|
golang.org/x/net v0.26.0 // indirect
|
||||||
|
golang.org/x/text v0.16.0 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
k8s.io/apimachinery v0.30.3 // indirect
|
k8s.io/apimachinery v0.31.0 // indirect
|
||||||
k8s.io/klog/v2 v2.120.1 // indirect
|
k8s.io/klog/v2 v2.130.1 // indirect
|
||||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
|
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
|
||||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||||
)
|
)
|
||||||
|
46
api/go.sum
46
api/go.sum
@ -1,10 +1,13 @@
|
|||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||||
|
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
@ -28,16 +31,19 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
|
|||||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||||
github.com/openshift/api v0.0.0-20240115183315-0793e918179d h1:gtwDqGPf5QmsV8jvOUoDNbtyeby9QeLdsybNQ8mGqHQ=
|
github.com/openshift/api v0.0.0-20240115183315-0793e918179d h1:gtwDqGPf5QmsV8jvOUoDNbtyeby9QeLdsybNQ8mGqHQ=
|
||||||
github.com/openshift/api v0.0.0-20240115183315-0793e918179d/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4=
|
github.com/openshift/api v0.0.0-20240115183315-0793e918179d/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||||
|
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
|
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||||
|
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
@ -49,8 +55,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
|
|||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
|
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||||
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@ -59,8 +65,8 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
@ -79,17 +85,17 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
|||||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ=
|
k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo=
|
||||||
k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04=
|
k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE=
|
||||||
k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc=
|
k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc=
|
||||||
k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
|
k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
||||||
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
|
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||||
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
|
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
|
||||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||||
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||||
|
12
api/vendor/github.com/fxamacker/cbor/v2/.gitignore
generated
vendored
Normal file
12
api/vendor/github.com/fxamacker/cbor/v2/.gitignore
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
# Binaries for programs and plugins
|
||||||
|
*.exe
|
||||||
|
*.exe~
|
||||||
|
*.dll
|
||||||
|
*.so
|
||||||
|
*.dylib
|
||||||
|
|
||||||
|
# Test binary, build with `go test -c`
|
||||||
|
*.test
|
||||||
|
|
||||||
|
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||||
|
*.out
|
104
api/vendor/github.com/fxamacker/cbor/v2/.golangci.yml
generated
vendored
Normal file
104
api/vendor/github.com/fxamacker/cbor/v2/.golangci.yml
generated
vendored
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
# Do not delete linter settings. Linters like gocritic can be enabled on the command line.
|
||||||
|
|
||||||
|
linters-settings:
|
||||||
|
depguard:
|
||||||
|
rules:
|
||||||
|
prevent_unmaintained_packages:
|
||||||
|
list-mode: strict
|
||||||
|
files:
|
||||||
|
- $all
|
||||||
|
- "!$test"
|
||||||
|
allow:
|
||||||
|
- $gostd
|
||||||
|
- github.com/x448/float16
|
||||||
|
deny:
|
||||||
|
- pkg: io/ioutil
|
||||||
|
desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil"
|
||||||
|
dupl:
|
||||||
|
threshold: 100
|
||||||
|
funlen:
|
||||||
|
lines: 100
|
||||||
|
statements: 50
|
||||||
|
goconst:
|
||||||
|
ignore-tests: true
|
||||||
|
min-len: 2
|
||||||
|
min-occurrences: 3
|
||||||
|
gocritic:
|
||||||
|
enabled-tags:
|
||||||
|
- diagnostic
|
||||||
|
- experimental
|
||||||
|
- opinionated
|
||||||
|
- performance
|
||||||
|
- style
|
||||||
|
disabled-checks:
|
||||||
|
- commentedOutCode
|
||||||
|
- dupImport # https://github.com/go-critic/go-critic/issues/845
|
||||||
|
- ifElseChain
|
||||||
|
- octalLiteral
|
||||||
|
- paramTypeCombine
|
||||||
|
- whyNoLint
|
||||||
|
gofmt:
|
||||||
|
simplify: false
|
||||||
|
goimports:
|
||||||
|
local-prefixes: github.com/fxamacker/cbor
|
||||||
|
golint:
|
||||||
|
min-confidence: 0
|
||||||
|
govet:
|
||||||
|
check-shadowing: true
|
||||||
|
lll:
|
||||||
|
line-length: 140
|
||||||
|
maligned:
|
||||||
|
suggest-new: true
|
||||||
|
misspell:
|
||||||
|
locale: US
|
||||||
|
staticcheck:
|
||||||
|
checks: ["all"]
|
||||||
|
|
||||||
|
linters:
|
||||||
|
disable-all: true
|
||||||
|
enable:
|
||||||
|
- asciicheck
|
||||||
|
- bidichk
|
||||||
|
- depguard
|
||||||
|
- errcheck
|
||||||
|
- exportloopref
|
||||||
|
- goconst
|
||||||
|
- gocritic
|
||||||
|
- gocyclo
|
||||||
|
- gofmt
|
||||||
|
- goimports
|
||||||
|
- goprintffuncname
|
||||||
|
- gosec
|
||||||
|
- gosimple
|
||||||
|
- govet
|
||||||
|
- ineffassign
|
||||||
|
- misspell
|
||||||
|
- nilerr
|
||||||
|
- revive
|
||||||
|
- staticcheck
|
||||||
|
- stylecheck
|
||||||
|
- typecheck
|
||||||
|
- unconvert
|
||||||
|
- unused
|
||||||
|
|
||||||
|
issues:
|
||||||
|
# max-issues-per-linter default is 50. Set to 0 to disable limit.
|
||||||
|
max-issues-per-linter: 0
|
||||||
|
# max-same-issues default is 3. Set to 0 to disable limit.
|
||||||
|
max-same-issues: 0
|
||||||
|
|
||||||
|
exclude-rules:
|
||||||
|
- path: decode.go
|
||||||
|
text: "string ` overflows ` has (\\d+) occurrences, make it a constant"
|
||||||
|
- path: decode.go
|
||||||
|
text: "string ` \\(range is \\[` has (\\d+) occurrences, make it a constant"
|
||||||
|
- path: decode.go
|
||||||
|
text: "string `, ` has (\\d+) occurrences, make it a constant"
|
||||||
|
- path: decode.go
|
||||||
|
text: "string ` overflows Go's int64` has (\\d+) occurrences, make it a constant"
|
||||||
|
- path: decode.go
|
||||||
|
text: "string `\\]\\)` has (\\d+) occurrences, make it a constant"
|
||||||
|
- path: valid.go
|
||||||
|
text: "string ` for type ` has (\\d+) occurrences, make it a constant"
|
||||||
|
- path: valid.go
|
||||||
|
text: "string `cbor: ` has (\\d+) occurrences, make it a constant"
|
133
api/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md
generated
vendored
Normal file
133
api/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
|||||||
|
|
||||||
|
# Contributor Covenant Code of Conduct
|
||||||
|
|
||||||
|
## Our Pledge
|
||||||
|
|
||||||
|
We as members, contributors, and leaders pledge to make participation in our
|
||||||
|
community a harassment-free experience for everyone, regardless of age, body
|
||||||
|
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||||
|
identity and expression, level of experience, education, socio-economic status,
|
||||||
|
nationality, personal appearance, race, caste, color, religion, or sexual
|
||||||
|
identity and orientation.
|
||||||
|
|
||||||
|
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||||
|
diverse, inclusive, and healthy community.
|
||||||
|
|
||||||
|
## Our Standards
|
||||||
|
|
||||||
|
Examples of behavior that contributes to a positive environment for our
|
||||||
|
community include:
|
||||||
|
|
||||||
|
* Demonstrating empathy and kindness toward other people
|
||||||
|
* Being respectful of differing opinions, viewpoints, and experiences
|
||||||
|
* Giving and gracefully accepting constructive feedback
|
||||||
|
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||||
|
and learning from the experience
|
||||||
|
* Focusing on what is best not just for us as individuals, but for the overall
|
||||||
|
community
|
||||||
|
|
||||||
|
Examples of unacceptable behavior include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery, and sexual attention or advances of
|
||||||
|
any kind
|
||||||
|
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing others' private information, such as a physical or email address,
|
||||||
|
without their explicit permission
|
||||||
|
* Other conduct which could reasonably be considered inappropriate in a
|
||||||
|
professional setting
|
||||||
|
|
||||||
|
## Enforcement Responsibilities
|
||||||
|
|
||||||
|
Community leaders are responsible for clarifying and enforcing our standards of
|
||||||
|
acceptable behavior and will take appropriate and fair corrective action in
|
||||||
|
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||||
|
or harmful.
|
||||||
|
|
||||||
|
Community leaders have the right and responsibility to remove, edit, or reject
|
||||||
|
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||||
|
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||||
|
decisions when appropriate.
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
This Code of Conduct applies within all community spaces, and also applies when
|
||||||
|
an individual is officially representing the community in public spaces.
|
||||||
|
Examples of representing our community include using an official e-mail address,
|
||||||
|
posting via an official social media account, or acting as an appointed
|
||||||
|
representative at an online or offline event.
|
||||||
|
|
||||||
|
## Enforcement
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||||
|
reported to the community leaders responsible for enforcement at
|
||||||
|
faye.github@gmail.com.
|
||||||
|
All complaints will be reviewed and investigated promptly and fairly.
|
||||||
|
|
||||||
|
All community leaders are obligated to respect the privacy and security of the
|
||||||
|
reporter of any incident.
|
||||||
|
|
||||||
|
## Enforcement Guidelines
|
||||||
|
|
||||||
|
Community leaders will follow these Community Impact Guidelines in determining
|
||||||
|
the consequences for any action they deem in violation of this Code of Conduct:
|
||||||
|
|
||||||
|
### 1. Correction
|
||||||
|
|
||||||
|
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||||
|
unprofessional or unwelcome in the community.
|
||||||
|
|
||||||
|
**Consequence**: A private, written warning from community leaders, providing
|
||||||
|
clarity around the nature of the violation and an explanation of why the
|
||||||
|
behavior was inappropriate. A public apology may be requested.
|
||||||
|
|
||||||
|
### 2. Warning
|
||||||
|
|
||||||
|
**Community Impact**: A violation through a single incident or series of
|
||||||
|
actions.
|
||||||
|
|
||||||
|
**Consequence**: A warning with consequences for continued behavior. No
|
||||||
|
interaction with the people involved, including unsolicited interaction with
|
||||||
|
those enforcing the Code of Conduct, for a specified period of time. This
|
||||||
|
includes avoiding interactions in community spaces as well as external channels
|
||||||
|
like social media. Violating these terms may lead to a temporary or permanent
|
||||||
|
ban.
|
||||||
|
|
||||||
|
### 3. Temporary Ban
|
||||||
|
|
||||||
|
**Community Impact**: A serious violation of community standards, including
|
||||||
|
sustained inappropriate behavior.
|
||||||
|
|
||||||
|
**Consequence**: A temporary ban from any sort of interaction or public
|
||||||
|
communication with the community for a specified period of time. No public or
|
||||||
|
private interaction with the people involved, including unsolicited interaction
|
||||||
|
with those enforcing the Code of Conduct, is allowed during this period.
|
||||||
|
Violating these terms may lead to a permanent ban.
|
||||||
|
|
||||||
|
### 4. Permanent Ban
|
||||||
|
|
||||||
|
**Community Impact**: Demonstrating a pattern of violation of community
|
||||||
|
standards, including sustained inappropriate behavior, harassment of an
|
||||||
|
individual, or aggression toward or disparagement of classes of individuals.
|
||||||
|
|
||||||
|
**Consequence**: A permanent ban from any sort of public interaction within the
|
||||||
|
community.
|
||||||
|
|
||||||
|
## Attribution
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||||
|
version 2.1, available at
|
||||||
|
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
||||||
|
|
||||||
|
Community Impact Guidelines were inspired by
|
||||||
|
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
||||||
|
|
||||||
|
For answers to common questions about this code of conduct, see the FAQ at
|
||||||
|
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
||||||
|
[https://www.contributor-covenant.org/translations][translations].
|
||||||
|
|
||||||
|
[homepage]: https://www.contributor-covenant.org
|
||||||
|
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
||||||
|
[Mozilla CoC]: https://github.com/mozilla/diversity
|
||||||
|
[FAQ]: https://www.contributor-covenant.org/faq
|
||||||
|
[translations]: https://www.contributor-covenant.org/translations
|
41
api/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md
generated
vendored
Normal file
41
api/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
# How to contribute
|
||||||
|
|
||||||
|
You can contribute by using the library, opening issues, or opening pull requests.
|
||||||
|
|
||||||
|
## Bug reports and security vulnerabilities
|
||||||
|
|
||||||
|
Most issues are tracked publicly on [GitHub](https://github.com/fxamacker/cbor/issues).
|
||||||
|
|
||||||
|
To report security vulnerabilities, please email faye.github@gmail.com and allow time for the problem to be resolved before disclosing it to the public. For more info, see [Security Policy](https://github.com/fxamacker/cbor#security-policy).
|
||||||
|
|
||||||
|
Please do not send data that might contain personally identifiable information, even if you think you have permission. That type of support requires payment and a signed contract where I'm indemnified, held harmless, and defended by you for any data you send to me.
|
||||||
|
|
||||||
|
## Pull requests
|
||||||
|
|
||||||
|
Please [create an issue](https://github.com/fxamacker/cbor/issues/new/choose) before you begin work on a PR. The improvement may have already been considered, etc.
|
||||||
|
|
||||||
|
Pull requests have signing requirements and must not be anonymous. Exceptions are usually made for docs and CI scripts.
|
||||||
|
|
||||||
|
See the [Pull Request Template](https://github.com/fxamacker/cbor/blob/master/.github/pull_request_template.md) for details.
|
||||||
|
|
||||||
|
Pull requests have a greater chance of being approved if:
|
||||||
|
- it does not reduce speed, increase memory use, reduce security, etc. for people not using the new option or feature.
|
||||||
|
- it has > 97% code coverage.
|
||||||
|
|
||||||
|
## Describe your issue
|
||||||
|
|
||||||
|
Clearly describe the issue:
|
||||||
|
* If it's a bug, please provide: **version of this library** and **Go** (`go version`), **unmodified error message**, and describe **how to reproduce it**. Also state **what you expected to happen** instead of the error.
|
||||||
|
* If you propose a change or addition, try to give an example how the improved code could look like or how to use it.
|
||||||
|
* If you found a compilation error, please confirm you're using a supported version of Go. If you are, then provide the output of `go version` first, followed by the complete error message.
|
||||||
|
|
||||||
|
## Please don't
|
||||||
|
|
||||||
|
Please don't send data containing personally identifiable information, even if you think you have permission. That type of support requires payment and a contract where I'm indemnified, held harmless, and defended for any data you send to me.
|
||||||
|
|
||||||
|
Please don't send CBOR data larger than 1024 bytes by email. If you want to send crash-producing CBOR data > 1024 bytes by email, please get my permission before sending it to me.
|
||||||
|
|
||||||
|
## Credits
|
||||||
|
|
||||||
|
- This guide used nlohmann/json contribution guidelines for inspiration as suggested in issue #22.
|
||||||
|
- Special thanks to @lukseven for pointing out the contribution guidelines didn't mention signing requirements.
|
21
api/vendor/github.com/fxamacker/cbor/v2/LICENSE
generated
vendored
Normal file
21
api/vendor/github.com/fxamacker/cbor/v2/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2019-present Faye Amacker
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
691
api/vendor/github.com/fxamacker/cbor/v2/README.md
generated
vendored
Normal file
691
api/vendor/github.com/fxamacker/cbor/v2/README.md
generated
vendored
Normal file
@ -0,0 +1,691 @@
|
|||||||
|
# CBOR Codec in Go
|
||||||
|
|
||||||
|
<!-- [![](https://github.com/fxamacker/images/raw/master/cbor/v2.5.0/fxamacker_cbor_banner.png)](#cbor-library-in-go) -->
|
||||||
|
|
||||||
|
[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html).
|
||||||
|
|
||||||
|
CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc. CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades.
|
||||||
|
|
||||||
|
`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor).
|
||||||
|
|
||||||
|
See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer.
|
||||||
|
|
||||||
|
## fxamacker/cbor
|
||||||
|
|
||||||
|
[![](https://github.com/fxamacker/cbor/workflows/ci/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci)
|
||||||
|
[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A596%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22)
|
||||||
|
[![CodeQL](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml)
|
||||||
|
[![](https://img.shields.io/badge/fuzzing-passing-44c010)](#fuzzing-and-code-coverage)
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/github.com/fxamacker/cbor)](https://goreportcard.com/report/github.com/fxamacker/cbor)
|
||||||
|
|
||||||
|
`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
|
||||||
|
|
||||||
|
Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc.
|
||||||
|
|
||||||
|
Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc.
|
||||||
|
|
||||||
|
<details><summary>Highlights</summary><p/>
|
||||||
|
|
||||||
|
__🚀 Speed__
|
||||||
|
|
||||||
|
Encoding and decoding is fast without using Go's `unsafe` package. Slower settings are opt-in. Default limits allow very fast and memory efficient rejection of malformed CBOR data.
|
||||||
|
|
||||||
|
__🔒 Security__
|
||||||
|
|
||||||
|
Decoder has configurable limits that defend against malicious inputs. Duplicate map key detection is supported. By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
|
||||||
|
|
||||||
|
Codec passed multiple confidential security assessments in 2022. No vulnerabilities found in subset of codec in a [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) prepared by NCC Group for Microsoft Corporation.
|
||||||
|
|
||||||
|
__🗜️ Data Size__
|
||||||
|
|
||||||
|
Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit.
|
||||||
|
|
||||||
|
__:jigsaw: Usability__
|
||||||
|
|
||||||
|
API is mostly same as `encoding/json` plus interfaces that simplify concurrency for CBOR options. Encoding and decoding modes can be created at startup and reused by any goroutines.
|
||||||
|
|
||||||
|
Presets include Core Deterministic Encoding, Preferred Serialization, CTAP2 Canonical CBOR, etc.
|
||||||
|
|
||||||
|
__📆 Extensibility__
|
||||||
|
|
||||||
|
Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.html#section-7.1) (e.g. CBOR tags) and extensive settings. API has interfaces that allow users to create custom encoding and decoding without modifying this library.
|
||||||
|
|
||||||
|
<hr/>
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
### Secure Decoding with Configurable Settings
|
||||||
|
|
||||||
|
`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data.
|
||||||
|
|
||||||
|
By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
|
||||||
|
|
||||||
|
<details><summary>Example decoding with encoding/gob 💥 fatal error (out of memory)</summary><p/>
|
||||||
|
|
||||||
|
```Go
|
||||||
|
// Example of encoding/gob having "fatal error: runtime: out of memory"
|
||||||
|
// while decoding 181 bytes.
|
||||||
|
package main
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/gob"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Example data is from https://github.com/golang/go/issues/24446
|
||||||
|
// (shortened to 181 bytes).
|
||||||
|
const data = "4dffb503010102303001ff30000109010130010800010130010800010130" +
|
||||||
|
"01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" +
|
||||||
|
"860001013001ff860001013001ffb80000001eff850401010e3030303030" +
|
||||||
|
"30303030303030303001ff3000010c0104000016ffb70201010830303030" +
|
||||||
|
"3030303001ff3000010c000030ffb6040405fcff00303030303030303030" +
|
||||||
|
"303030303030303030303030303030303030303030303030303030303030" +
|
||||||
|
"30"
|
||||||
|
|
||||||
|
type X struct {
|
||||||
|
J *X
|
||||||
|
K map[string]int
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
raw, _ := hex.DecodeString(data)
|
||||||
|
decoder := gob.NewDecoder(bytes.NewReader(raw))
|
||||||
|
|
||||||
|
var x X
|
||||||
|
decoder.Decode(&x) // fatal error: runtime: out of memory
|
||||||
|
fmt.Println("Decoding finished.")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
<hr/>
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to
|
||||||
|
decode 10 bytes of malicious CBOR data to `[]byte` (with default settings):
|
||||||
|
|
||||||
|
| Codec | Speed (ns/op) | Memory | Allocs |
|
||||||
|
| :---- | ------------: | -----: | -----: |
|
||||||
|
| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op |
|
||||||
|
| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op |
|
||||||
|
|
||||||
|
<details><summary>Benchmark details</summary><p/>
|
||||||
|
|
||||||
|
Latest comparison used:
|
||||||
|
- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
|
||||||
|
- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933)
|
||||||
|
- go test -bench=. -benchmem -count=20
|
||||||
|
|
||||||
|
#### Prior comparisons
|
||||||
|
|
||||||
|
| Codec | Speed (ns/op) | Memory | Allocs |
|
||||||
|
| :---- | ------------: | -----: | -----: |
|
||||||
|
| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op |
|
||||||
|
| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op |
|
||||||
|
| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op |
|
||||||
|
| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate |
|
||||||
|
|
||||||
|
- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
|
||||||
|
- go1.19.6, linux/amd64, i5-13600K (DDR4)
|
||||||
|
- go test -bench=. -benchmem -count=20
|
||||||
|
|
||||||
|
<hr/>
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
### Smaller Encodings with Struct Tags
|
||||||
|
|
||||||
|
Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
|
||||||
|
|
||||||
|
<details><summary>Example encoding 3-level nested Go struct to 1 byte CBOR</summary><p/>
|
||||||
|
|
||||||
|
https://go.dev/play/p/YxwvfPdFQG2
|
||||||
|
|
||||||
|
```Go
|
||||||
|
// Example encoding nested struct (with omitempty tag)
|
||||||
|
// - encoding/json: 18 byte JSON
|
||||||
|
// - fxamacker/cbor: 1 byte CBOR
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/fxamacker/cbor/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type GrandChild struct {
|
||||||
|
Quux int `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Child struct {
|
||||||
|
Baz int `json:",omitempty"`
|
||||||
|
Qux GrandChild `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Parent struct {
|
||||||
|
Foo Child `json:",omitempty"`
|
||||||
|
Bar int `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func cb() {
|
||||||
|
results, _ := cbor.Marshal(Parent{})
|
||||||
|
fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
|
||||||
|
|
||||||
|
text, _ := cbor.Diagnose(results) // Diagnostic Notation
|
||||||
|
fmt.Println("DN: " + text)
|
||||||
|
}
|
||||||
|
|
||||||
|
func js() {
|
||||||
|
results, _ := json.Marshal(Parent{})
|
||||||
|
fmt.Println("hex(JSON): " + hex.EncodeToString(results))
|
||||||
|
|
||||||
|
text := string(results) // JSON
|
||||||
|
fmt.Println("JSON: " + text)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
cb()
|
||||||
|
fmt.Println("-------------")
|
||||||
|
js()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Output (DN is Diagnostic Notation):
|
||||||
|
```
|
||||||
|
hex(CBOR): a0
|
||||||
|
DN: {}
|
||||||
|
-------------
|
||||||
|
hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
|
||||||
|
JSON: {"Foo":{"Qux":{}}}
|
||||||
|
```
|
||||||
|
|
||||||
|
<hr/>
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
Example using different struct tags together:
|
||||||
|
|
||||||
|
![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags")
|
||||||
|
|
||||||
|
API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options.
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`.
|
||||||
|
|
||||||
|
### Key Points
|
||||||
|
|
||||||
|
This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742).
|
||||||
|
|
||||||
|
- __CBOR data item__ is a single piece of CBOR data and its structure may contain 0 or more nested data items.
|
||||||
|
- __CBOR sequence__ is a concatenation of 0 or more encoded CBOR data items.
|
||||||
|
|
||||||
|
Configurable limits and options can be used to balance trade-offs.
|
||||||
|
|
||||||
|
- Encoding and decoding modes are created from options (settings).
|
||||||
|
- Modes can be created at startup and reused.
|
||||||
|
- Modes are safe for concurrent use.
|
||||||
|
|
||||||
|
### Default Mode
|
||||||
|
|
||||||
|
Package level functions only use this library's default settings.
|
||||||
|
They provide the "default mode" of encoding and decoding.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// API matches encoding/json for Marshal, Unmarshal, Encode, Decode, etc.
|
||||||
|
b, err = cbor.Marshal(v) // encode v to []byte b
|
||||||
|
err = cbor.Unmarshal(b, &v) // decode []byte b to v
|
||||||
|
decoder = cbor.NewDecoder(r) // create decoder with io.Reader r
|
||||||
|
err = decoder.Decode(&v) // decode a CBOR data item to v
|
||||||
|
|
||||||
|
// v2.7.0 added MarshalToBuffer() and UserBufferEncMode interface.
|
||||||
|
err = cbor.MarshalToBuffer(v, b) // encode v to b instead of using built-in buf pool.
|
||||||
|
|
||||||
|
// v2.5.0 added new functions that return remaining bytes.
|
||||||
|
|
||||||
|
// UnmarshalFirst decodes first CBOR data item and returns remaining bytes.
|
||||||
|
rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v
|
||||||
|
|
||||||
|
// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes.
|
||||||
|
text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text
|
||||||
|
|
||||||
|
// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes,
|
||||||
|
// but new funcs UnmarshalFirst and DiagnoseFirst do not.
|
||||||
|
```
|
||||||
|
|
||||||
|
__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc.
|
||||||
|
|
||||||
|
- Different CBOR libraries may use different default settings.
|
||||||
|
- CBOR-based formats or protocols usually require specific settings.
|
||||||
|
|
||||||
|
For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset.
|
||||||
|
|
||||||
|
### Presets
|
||||||
|
|
||||||
|
Presets can be used as-is or as a starting point for custom settings.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// EncOptions is a struct of encoder settings.
|
||||||
|
func CoreDetEncOptions() EncOptions // RFC 8949 Core Deterministic Encoding
|
||||||
|
func PreferredUnsortedEncOptions() EncOptions // RFC 8949 Preferred Serialization
|
||||||
|
func CanonicalEncOptions() EncOptions // RFC 7049 Canonical CBOR
|
||||||
|
func CTAP2EncOptions() EncOptions // FIDO2 CTAP2 Canonical CBOR
|
||||||
|
```
|
||||||
|
|
||||||
|
Presets are used to create custom modes.
|
||||||
|
|
||||||
|
### Custom Modes
|
||||||
|
|
||||||
|
Modes are created from settings. Once created, modes have immutable settings.
|
||||||
|
|
||||||
|
💡 Create the mode at startup and reuse it. It is safe for concurrent use.
|
||||||
|
|
||||||
|
```Go
|
||||||
|
// Create encoding mode.
|
||||||
|
opts := cbor.CoreDetEncOptions() // use preset options as a starting point
|
||||||
|
opts.Time = cbor.TimeUnix // change any settings if needed
|
||||||
|
em, err := opts.EncMode() // create an immutable encoding mode
|
||||||
|
|
||||||
|
// Reuse the encoding mode. It is safe for concurrent use.
|
||||||
|
|
||||||
|
// API matches encoding/json.
|
||||||
|
b, err := em.Marshal(v) // encode v to []byte b
|
||||||
|
encoder := em.NewEncoder(w) // create encoder with io.Writer w
|
||||||
|
err := encoder.Encode(v) // encode v to io.Writer w
|
||||||
|
```
|
||||||
|
|
||||||
|
Default mode and custom modes automatically apply struct tags.
|
||||||
|
|
||||||
|
### User Specified Buffer for Encoding (v2.7.0)
|
||||||
|
|
||||||
|
`UserBufferEncMode` interface extends `EncMode` interface to add `MarshalToBuffer()`. It accepts a user-specified buffer instead of using built-in buffer pool.
|
||||||
|
|
||||||
|
```Go
|
||||||
|
em, err := myEncOptions.UserBufferEncMode() // create UserBufferEncMode mode
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
err = em.MarshalToBuffer(v, &buf) // encode v to provided buf
|
||||||
|
```
|
||||||
|
|
||||||
|
### Struct Tags
|
||||||
|
|
||||||
|
Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
|
||||||
|
|
||||||
|
<details><summary>Example encoding 3-level nested Go struct to 1 byte CBOR</summary><p/>
|
||||||
|
|
||||||
|
https://go.dev/play/p/YxwvfPdFQG2
|
||||||
|
|
||||||
|
```Go
|
||||||
|
// Example encoding nested struct (with omitempty tag)
|
||||||
|
// - encoding/json: 18 byte JSON
|
||||||
|
// - fxamacker/cbor: 1 byte CBOR
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/fxamacker/cbor/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type GrandChild struct {
|
||||||
|
Quux int `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Child struct {
|
||||||
|
Baz int `json:",omitempty"`
|
||||||
|
Qux GrandChild `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Parent struct {
|
||||||
|
Foo Child `json:",omitempty"`
|
||||||
|
Bar int `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func cb() {
|
||||||
|
results, _ := cbor.Marshal(Parent{})
|
||||||
|
fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
|
||||||
|
|
||||||
|
text, _ := cbor.Diagnose(results) // Diagnostic Notation
|
||||||
|
fmt.Println("DN: " + text)
|
||||||
|
}
|
||||||
|
|
||||||
|
func js() {
|
||||||
|
results, _ := json.Marshal(Parent{})
|
||||||
|
fmt.Println("hex(JSON): " + hex.EncodeToString(results))
|
||||||
|
|
||||||
|
text := string(results) // JSON
|
||||||
|
fmt.Println("JSON: " + text)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
cb()
|
||||||
|
fmt.Println("-------------")
|
||||||
|
js()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Output (DN is Diagnostic Notation):
|
||||||
|
```
|
||||||
|
hex(CBOR): a0
|
||||||
|
DN: {}
|
||||||
|
-------------
|
||||||
|
hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
|
||||||
|
JSON: {"Foo":{"Qux":{}}}
|
||||||
|
```
|
||||||
|
|
||||||
|
<hr/>
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details><summary>Example using several struct tags</summary><p/>
|
||||||
|
|
||||||
|
![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags")
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys.
|
||||||
|
|
||||||
|
### CBOR Tags
|
||||||
|
|
||||||
|
CBOR tags are specified in a `TagSet`.
|
||||||
|
|
||||||
|
Custom modes can be created with a `TagSet` to handle CBOR tags.
|
||||||
|
|
||||||
|
```go
|
||||||
|
em, err := opts.EncMode() // no CBOR tags
|
||||||
|
em, err := opts.EncModeWithTags(ts) // immutable CBOR tags
|
||||||
|
em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags
|
||||||
|
```
|
||||||
|
|
||||||
|
`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`.
|
||||||
|
|
||||||
|
<details><summary>Example using TagSet and TagOptions</summary><p/>
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Use signedCWT struct defined in "Decoding CWT" example.
|
||||||
|
|
||||||
|
// Create TagSet (safe for concurrency).
|
||||||
|
tags := cbor.NewTagSet()
|
||||||
|
// Register tag COSE_Sign1 18 with signedCWT type.
|
||||||
|
tags.Add(
|
||||||
|
cbor.TagOptions{EncTag: cbor.EncTagRequired, DecTag: cbor.DecTagRequired},
|
||||||
|
reflect.TypeOf(signedCWT{}),
|
||||||
|
18)
|
||||||
|
|
||||||
|
// Create DecMode with immutable tags.
|
||||||
|
dm, _ := cbor.DecOptions{}.DecModeWithTags(tags)
|
||||||
|
|
||||||
|
// Unmarshal to signedCWT with tag support.
|
||||||
|
var v signedCWT
|
||||||
|
if err := dm.Unmarshal(data, &v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create EncMode with immutable tags.
|
||||||
|
em, _ := cbor.EncOptions{}.EncModeWithTags(tags)
|
||||||
|
|
||||||
|
// Marshal signedCWT with tag number.
|
||||||
|
if data, err := cbor.Marshal(v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
### Functions and Interfaces
|
||||||
|
|
||||||
|
<details><summary>Functions and interfaces at a glance</summary><p/>
|
||||||
|
|
||||||
|
Common functions with same API as `encoding/json`:
|
||||||
|
- `Marshal`, `Unmarshal`
|
||||||
|
- `NewEncoder`, `(*Encoder).Encode`
|
||||||
|
- `NewDecoder`, `(*Decoder).Decode`
|
||||||
|
|
||||||
|
NOTE: `Unmarshal` will return `ExtraneousDataError` if there are remaining bytes
|
||||||
|
because RFC 8949 treats CBOR data item with remaining bytes as malformed.
|
||||||
|
- 💡 Use `UnmarshalFirst` to decode first CBOR data item and return any remaining bytes.
|
||||||
|
|
||||||
|
Other useful functions:
|
||||||
|
- `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data.
|
||||||
|
- `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes.
|
||||||
|
- `Wellformed` returns true if the the CBOR data item is well-formed.
|
||||||
|
|
||||||
|
Interfaces identical or comparable to Go `encoding` packages include:
|
||||||
|
`Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`.
|
||||||
|
|
||||||
|
The `RawMessage` type can be used to delay CBOR decoding or precompute CBOR encoding.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
### Security Tips
|
||||||
|
|
||||||
|
🔒 Use Go's `io.LimitReader` to limit size when decoding very large or indefinite size data.
|
||||||
|
|
||||||
|
Default limits may need to be increased for systems handling very large data (e.g. blockchains).
|
||||||
|
|
||||||
|
`DecOptions` can be used to modify default limits for `MaxArrayElements`, `MaxMapPairs`, and `MaxNestedLevels`.
|
||||||
|
|
||||||
|
## Status
|
||||||
|
|
||||||
|
v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality.
|
||||||
|
|
||||||
|
For more details, see [release notes](https://github.com/fxamacker/cbor/releases).
|
||||||
|
|
||||||
|
### Prior Release
|
||||||
|
|
||||||
|
[v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings.
|
||||||
|
|
||||||
|
v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
|
||||||
|
|
||||||
|
__IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading.
|
||||||
|
|
||||||
|
See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) for list of new features, improvements, and bug fixes.
|
||||||
|
|
||||||
|
See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc.
|
||||||
|
|
||||||
|
<!--
|
||||||
|
<details><summary>👉 Benchmark Comparison: v2.4.0 vs v2.5.0</summary><p/>
|
||||||
|
|
||||||
|
TODO: Update to v2.4.0 vs 2.5.0 (not beta2).
|
||||||
|
|
||||||
|
Comparison of v2.4.0 vs v2.5.0-beta2 provided by @448 (edited to fit width).
|
||||||
|
|
||||||
|
PR [#382](https://github.com/fxamacker/cbor/pull/382) returns buffer to pool in `Encode()`. It adds a bit of overhead to `Encode()` but `NewEncoder().Encode()` is a lot faster and uses less memory as shown here:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ benchstat bench-v2.4.0.log bench-f9e6291.log
|
||||||
|
goos: linux
|
||||||
|
goarch: amd64
|
||||||
|
pkg: github.com/fxamacker/cbor/v2
|
||||||
|
cpu: 12th Gen Intel(R) Core(TM) i7-12700H
|
||||||
|
│ bench-v2.4.0.log │ bench-f9e6291.log │
|
||||||
|
│ sec/op │ sec/op vs base │
|
||||||
|
NewEncoderEncode/Go_bool_to_CBOR_bool-20 236.70n ± 2% 58.04n ± 1% -75.48% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_uint64_to_CBOR_positive_int-20 238.00n ± 2% 63.93n ± 1% -73.14% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_int64_to_CBOR_negative_int-20 238.65n ± 2% 64.88n ± 1% -72.81% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_float64_to_CBOR_float-20 242.00n ± 2% 63.00n ± 1% -73.97% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_[]uint8_to_CBOR_bytes-20 245.60n ± 1% 68.55n ± 1% -72.09% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_string_to_CBOR_text-20 243.20n ± 3% 68.39n ± 1% -71.88% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_[]int_to_CBOR_array-20 563.0n ± 2% 378.3n ± 0% -32.81% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_map[string]string_to_CBOR_map-20 2.043µ ± 2% 1.906µ ± 2% -6.75% (p=0.000 n=10)
|
||||||
|
geomean 349.7n 122.7n -64.92%
|
||||||
|
|
||||||
|
│ bench-v2.4.0.log │ bench-f9e6291.log │
|
||||||
|
│ B/op │ B/op vs base │
|
||||||
|
NewEncoderEncode/Go_bool_to_CBOR_bool-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_uint64_to_CBOR_positive_int-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_int64_to_CBOR_negative_int-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_float64_to_CBOR_float-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_[]uint8_to_CBOR_bytes-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_string_to_CBOR_text-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_[]int_to_CBOR_array-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_map[string]string_to_CBOR_map-20 544.0 ± 0% 416.0 ± 0% -23.53% (p=0.000 n=10)
|
||||||
|
geomean 153.4 ? ¹ ²
|
||||||
|
¹ summaries must be >0 to compute geomean
|
||||||
|
² ratios must be >0 to compute geomean
|
||||||
|
|
||||||
|
│ bench-v2.4.0.log │ bench-f9e6291.log │
|
||||||
|
│ allocs/op │ allocs/op vs base │
|
||||||
|
NewEncoderEncode/Go_bool_to_CBOR_bool-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_uint64_to_CBOR_positive_int-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_int64_to_CBOR_negative_int-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_float64_to_CBOR_float-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_[]uint8_to_CBOR_bytes-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_string_to_CBOR_text-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_[]int_to_CBOR_array-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_map[string]string_to_CBOR_map-20 28.00 ± 0% 26.00 ± 0% -7.14% (p=0.000 n=10)
|
||||||
|
geomean 2.782 ? ¹ ²
|
||||||
|
¹ summaries must be >0 to compute geomean
|
||||||
|
² ratios must be >0 to compute geomean
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
-->
|
||||||
|
|
||||||
|
## Who uses fxamacker/cbor
|
||||||
|
|
||||||
|
`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Cisco, Confidential Computing Consortium, ConsenSys, Dapper Labs, EdgeX Foundry, F5, FIDO Alliance, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Matrix.org, Microsoft, Mozilla, National Cybersecurity Agency of France (govt), Netherlands (govt), Oasis Protocol, Smallstep, Tailscale, Taurus SA, Teleport, TIBCO, and others.
|
||||||
|
|
||||||
|
`fxamacker/cbor` passed multiple confidential security assessments. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) includes a subset of fxamacker/cbor v2.4.0 in its scope.
|
||||||
|
|
||||||
|
## Standards
|
||||||
|
|
||||||
|
`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
|
||||||
|
|
||||||
|
Notable CBOR features include:
|
||||||
|
|
||||||
|
| CBOR Feature | Description |
|
||||||
|
| :--- | :--- |
|
||||||
|
| CBOR tags | API supports built-in and user-defined tags. |
|
||||||
|
| Preferred serialization | Integers encode to fewest bytes. Optional float64 → float32 → float16. |
|
||||||
|
| Map key sorting | Unsorted, length-first (Canonical CBOR), and bytewise-lexicographic (CTAP2). |
|
||||||
|
| Duplicate map keys | Always forbid for encoding and option to allow/forbid for decoding. |
|
||||||
|
| Indefinite length data | Option to allow/forbid for encoding and decoding. |
|
||||||
|
| Well-formedness | Always checked and enforced. |
|
||||||
|
| Basic validity checks | Optionally check UTF-8 validity and duplicate map keys. |
|
||||||
|
| Security considerations | Prevent integer overflow and resource exhaustion (RFC 8949 Section 10). |
|
||||||
|
|
||||||
|
Known limitations are noted in the [Limitations section](#limitations).
|
||||||
|
|
||||||
|
Go nil values for slices, maps, pointers, etc. are encoded as CBOR null. Empty slices, maps, etc. are encoded as empty CBOR arrays and maps.
|
||||||
|
|
||||||
|
Decoder checks for all required well-formedness errors, including all "subkinds" of syntax errors and too little data.
|
||||||
|
|
||||||
|
After well-formedness is verified, basic validity errors are handled as follows:
|
||||||
|
|
||||||
|
* Invalid UTF-8 string: Decoder has option to check and return invalid UTF-8 string error. This check is enabled by default.
|
||||||
|
* Duplicate keys in a map: Decoder has options to ignore or enforce rejection of duplicate map keys.
|
||||||
|
|
||||||
|
When decoding well-formed CBOR arrays and maps, decoder saves the first error it encounters and continues with the next item. Options to handle this differently may be added in the future.
|
||||||
|
|
||||||
|
By default, decoder treats time values of floating-point NaN and Infinity as if they are CBOR Null or CBOR Undefined.
|
||||||
|
|
||||||
|
__Click to expand topic:__
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Duplicate Map Keys</summary><p>
|
||||||
|
|
||||||
|
This library provides options for fast detection and rejection of duplicate map keys based on applying a Go-specific data model to CBOR's extended generic data model in order to determine duplicate vs distinct map keys. Detection relies on whether the CBOR map key would be a duplicate "key" when decoded and applied to the user-provided Go map or struct.
|
||||||
|
|
||||||
|
`DupMapKeyQuiet` turns off detection of duplicate map keys. It tries to use a "keep fastest" method by choosing either "keep first" or "keep last" depending on the Go data type.
|
||||||
|
|
||||||
|
`DupMapKeyEnforcedAPF` enforces detection and rejection of duplidate map keys. Decoding stops immediately and returns `DupMapKeyError` when the first duplicate key is detected. The error includes the duplicate map key and the index number.
|
||||||
|
|
||||||
|
APF suffix means "Allow Partial Fill" so the destination map or struct can contain some decoded values at the time of error. It is the caller's responsibility to respond to the `DupMapKeyError` by discarding the partially filled result if that's required by their protocol.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Tag Validity</summary><p>
|
||||||
|
|
||||||
|
This library checks tag validity for built-in tags (currently tag numbers 0, 1, 2, 3, and 55799):
|
||||||
|
|
||||||
|
* Inadmissible type for tag content
|
||||||
|
* Inadmissible value for tag content
|
||||||
|
|
||||||
|
Unknown tag data items (not tag number 0, 1, 2, 3, or 55799) are handled in two ways:
|
||||||
|
|
||||||
|
* When decoding into an empty interface, unknown tag data item will be decoded into `cbor.Tag` data type, which contains tag number and tag content. The tag content will be decoded into the default Go data type for the CBOR data type.
|
||||||
|
* When decoding into other Go types, unknown tag data item is decoded into the specified Go type. If Go type is registered with a tag number, the tag number can optionally be verified.
|
||||||
|
|
||||||
|
Decoder also has an option to forbid tag data items (treat any tag data item as error) which is specified by protocols such as CTAP2 Canonical CBOR.
|
||||||
|
|
||||||
|
For more information, see [decoding options](#decoding-options-1) and [tag options](#tag-options).
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
## Limitations
|
||||||
|
|
||||||
|
If any of these limitations prevent you from using this library, please open an issue along with a link to your project.
|
||||||
|
|
||||||
|
* CBOR `Undefined` (0xf7) value decodes to Go's `nil` value. CBOR `Null` (0xf6) more closely matches Go's `nil`.
|
||||||
|
* CBOR map keys with data types not supported by Go for map keys are ignored and an error is returned after continuing to decode remaining items.
|
||||||
|
* When decoding registered CBOR tag data to interface type, decoder creates a pointer to registered Go type matching CBOR tag number. Requiring a pointer for this is a Go limitation.
|
||||||
|
|
||||||
|
## Fuzzing and Code Coverage
|
||||||
|
|
||||||
|
__Code coverage__ is always 95% or higher (with `go test -cover`) when tagging a release.
|
||||||
|
|
||||||
|
__Coverage-guided fuzzing__ must pass billions of execs using before tagging a release. Fuzzing is done using nonpublic code which may eventually get merged into this project. Until then, reports like OpenSSF Scorecard can't detect fuzz tests being used by this project.
|
||||||
|
|
||||||
|
<hr>
|
||||||
|
|
||||||
|
## Versions and API Changes
|
||||||
|
This project uses [Semantic Versioning](https://semver.org), so the API is always backwards compatible unless the major version number changes.
|
||||||
|
|
||||||
|
These functions have signatures identical to encoding/json and their API will continue to match `encoding/json` even after major new releases:
|
||||||
|
`Marshal`, `Unmarshal`, `NewEncoder`, `NewDecoder`, `(*Encoder).Encode`, and `(*Decoder).Decode`.
|
||||||
|
|
||||||
|
Exclusions from SemVer:
|
||||||
|
- Newly added API documented as "subject to change".
|
||||||
|
- Newly added API in the master branch that has never been tagged in non-beta release.
|
||||||
|
- If function parameters are unchanged, bug fixes that change behavior (e.g. return error for edge case was missed in prior version). We try to highlight these in the release notes and add extended beta period. E.g. [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
|
||||||
|
|
||||||
|
This project avoids breaking changes to behavior of encoding and decoding functions unless required to improve conformance with supported RFCs (e.g. RFC 8949, RFC 8742, etc.) Visible changes that don't improve conformance to standards are typically made available as new opt-in settings or new functions.
|
||||||
|
|
||||||
|
## Code of Conduct
|
||||||
|
|
||||||
|
This project has adopted the [Contributor Covenant Code of Conduct](CODE_OF_CONDUCT.md). Contact [faye.github@gmail.com](mailto:faye.github@gmail.com) with any questions or comments.
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
Please open an issue before beginning work on a PR. The improvement may have already been considered, etc.
|
||||||
|
|
||||||
|
For more info, see [How to Contribute](CONTRIBUTING.md).
|
||||||
|
|
||||||
|
## Security Policy
|
||||||
|
|
||||||
|
Security fixes are provided for the latest released version of fxamacker/cbor.
|
||||||
|
|
||||||
|
For the full text of the Security Policy, see [SECURITY.md](SECURITY.md).
|
||||||
|
|
||||||
|
## Acknowledgements
|
||||||
|
|
||||||
|
Many thanks to all the contributors on this project!
|
||||||
|
|
||||||
|
I'm especially grateful to Bastian Müller and Dieter Shirley for suggesting and collaborating on CBOR stream mode, and much more.
|
||||||
|
|
||||||
|
I'm very grateful to Stefan Tatschner, Yawning Angel, Jernej Kos, x448, ZenGround0, and Jakob Borg for their contributions or support in the very early days.
|
||||||
|
|
||||||
|
Big thanks to Ben Luddy for his contributions in v2.6.0 and v2.7.0.
|
||||||
|
|
||||||
|
This library clearly wouldn't be possible without Carsten Bormann authoring CBOR RFCs.
|
||||||
|
|
||||||
|
Special thanks to Laurence Lundblade and Jeffrey Yasskin for their help on IETF mailing list or at [7049bis](https://github.com/cbor-wg/CBORbis).
|
||||||
|
|
||||||
|
Huge thanks to The Go Authors for creating a fun and practical programming language with batteries included!
|
||||||
|
|
||||||
|
This library uses `x448/float16` which used to be included. As a standalone package, `x448/float16` is useful to other projects as well.
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
Copyright © 2019-2024 [Faye Amacker](https://github.com/fxamacker).
|
||||||
|
|
||||||
|
fxamacker/cbor is licensed under the MIT License. See [LICENSE](LICENSE) for the full license text.
|
||||||
|
|
||||||
|
<hr>
|
7
api/vendor/github.com/fxamacker/cbor/v2/SECURITY.md
generated
vendored
Normal file
7
api/vendor/github.com/fxamacker/cbor/v2/SECURITY.md
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
# Security Policy
|
||||||
|
|
||||||
|
Security fixes are provided for the latest released version of fxamacker/cbor.
|
||||||
|
|
||||||
|
If the security vulnerability is already known to the public, then you can open an issue as a bug report.
|
||||||
|
|
||||||
|
To report security vulnerabilities not yet known to the public, please email faye.github@gmail.com and allow time for the problem to be resolved before reporting it to the public.
|
63
api/vendor/github.com/fxamacker/cbor/v2/bytestring.go
generated
vendored
Normal file
63
api/vendor/github.com/fxamacker/cbor/v2/bytestring.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
// Copyright (c) Faye Amacker. All rights reserved.
|
||||||
|
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||||
|
|
||||||
|
package cbor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ByteString represents CBOR byte string (major type 2). ByteString can be used
|
||||||
|
// when using a Go []byte is not possible or convenient. For example, Go doesn't
|
||||||
|
// allow []byte as map key, so ByteString can be used to support data formats
|
||||||
|
// having CBOR map with byte string keys. ByteString can also be used to
|
||||||
|
// encode invalid UTF-8 string as CBOR byte string.
|
||||||
|
// See DecOption.MapKeyByteStringMode for more details.
|
||||||
|
type ByteString string
|
||||||
|
|
||||||
|
// Bytes returns bytes representing ByteString.
|
||||||
|
func (bs ByteString) Bytes() []byte {
|
||||||
|
return []byte(bs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalCBOR encodes ByteString as CBOR byte string (major type 2).
|
||||||
|
func (bs ByteString) MarshalCBOR() ([]byte, error) {
|
||||||
|
e := getEncodeBuffer()
|
||||||
|
defer putEncodeBuffer(e)
|
||||||
|
|
||||||
|
// Encode length
|
||||||
|
encodeHead(e, byte(cborTypeByteString), uint64(len(bs)))
|
||||||
|
|
||||||
|
// Encode data
|
||||||
|
buf := make([]byte, e.Len()+len(bs))
|
||||||
|
n := copy(buf, e.Bytes())
|
||||||
|
copy(buf[n:], bs)
|
||||||
|
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalCBOR decodes CBOR byte string (major type 2) to ByteString.
|
||||||
|
// Decoding CBOR null and CBOR undefined sets ByteString to be empty.
|
||||||
|
func (bs *ByteString) UnmarshalCBOR(data []byte) error {
|
||||||
|
if bs == nil {
|
||||||
|
return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decoding CBOR null and CBOR undefined to ByteString resets data.
|
||||||
|
// This behavior is similar to decoding CBOR null and CBOR undefined to []byte.
|
||||||
|
if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
|
||||||
|
*bs = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
d := decoder{data: data, dm: defaultDecMode}
|
||||||
|
|
||||||
|
// Check if CBOR data type is byte string
|
||||||
|
if typ := d.nextCBORType(); typ != cborTypeByteString {
|
||||||
|
return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeByteString.String()}
|
||||||
|
}
|
||||||
|
|
||||||
|
b, _ := d.parseByteString()
|
||||||
|
*bs = ByteString(b)
|
||||||
|
return nil
|
||||||
|
}
|
363
api/vendor/github.com/fxamacker/cbor/v2/cache.go
generated
vendored
Normal file
363
api/vendor/github.com/fxamacker/cbor/v2/cache.go
generated
vendored
Normal file
@ -0,0 +1,363 @@
|
|||||||
|
// Copyright (c) Faye Amacker. All rights reserved.
|
||||||
|
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||||
|
|
||||||
|
package cbor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type encodeFuncs struct {
|
||||||
|
ef encodeFunc
|
||||||
|
ief isEmptyFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
decodingStructTypeCache sync.Map // map[reflect.Type]*decodingStructType
|
||||||
|
encodingStructTypeCache sync.Map // map[reflect.Type]*encodingStructType
|
||||||
|
encodeFuncCache sync.Map // map[reflect.Type]encodeFuncs
|
||||||
|
typeInfoCache sync.Map // map[reflect.Type]*typeInfo
|
||||||
|
)
|
||||||
|
|
||||||
|
type specialType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
specialTypeNone specialType = iota
|
||||||
|
specialTypeUnmarshalerIface
|
||||||
|
specialTypeEmptyIface
|
||||||
|
specialTypeIface
|
||||||
|
specialTypeTag
|
||||||
|
specialTypeTime
|
||||||
|
)
|
||||||
|
|
||||||
|
type typeInfo struct {
|
||||||
|
elemTypeInfo *typeInfo
|
||||||
|
keyTypeInfo *typeInfo
|
||||||
|
typ reflect.Type
|
||||||
|
kind reflect.Kind
|
||||||
|
nonPtrType reflect.Type
|
||||||
|
nonPtrKind reflect.Kind
|
||||||
|
spclType specialType
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTypeInfo(t reflect.Type) *typeInfo {
|
||||||
|
tInfo := typeInfo{typ: t, kind: t.Kind()}
|
||||||
|
|
||||||
|
for t.Kind() == reflect.Ptr {
|
||||||
|
t = t.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
k := t.Kind()
|
||||||
|
|
||||||
|
tInfo.nonPtrType = t
|
||||||
|
tInfo.nonPtrKind = k
|
||||||
|
|
||||||
|
if k == reflect.Interface {
|
||||||
|
if t.NumMethod() == 0 {
|
||||||
|
tInfo.spclType = specialTypeEmptyIface
|
||||||
|
} else {
|
||||||
|
tInfo.spclType = specialTypeIface
|
||||||
|
}
|
||||||
|
} else if t == typeTag {
|
||||||
|
tInfo.spclType = specialTypeTag
|
||||||
|
} else if t == typeTime {
|
||||||
|
tInfo.spclType = specialTypeTime
|
||||||
|
} else if reflect.PtrTo(t).Implements(typeUnmarshaler) {
|
||||||
|
tInfo.spclType = specialTypeUnmarshalerIface
|
||||||
|
}
|
||||||
|
|
||||||
|
switch k {
|
||||||
|
case reflect.Array, reflect.Slice:
|
||||||
|
tInfo.elemTypeInfo = getTypeInfo(t.Elem())
|
||||||
|
case reflect.Map:
|
||||||
|
tInfo.keyTypeInfo = getTypeInfo(t.Key())
|
||||||
|
tInfo.elemTypeInfo = getTypeInfo(t.Elem())
|
||||||
|
}
|
||||||
|
|
||||||
|
return &tInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
type decodingStructType struct {
|
||||||
|
fields fields
|
||||||
|
fieldIndicesByName map[string]int
|
||||||
|
err error
|
||||||
|
toArray bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// The stdlib errors.Join was introduced in Go 1.20, and we still support Go 1.17, so instead,
|
||||||
|
// here's a very basic implementation of an aggregated error.
|
||||||
|
type multierror []error
|
||||||
|
|
||||||
|
func (m multierror) Error() string {
|
||||||
|
var sb strings.Builder
|
||||||
|
for i, err := range m {
|
||||||
|
sb.WriteString(err.Error())
|
||||||
|
if i < len(m)-1 {
|
||||||
|
sb.WriteString(", ")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDecodingStructType(t reflect.Type) *decodingStructType {
|
||||||
|
if v, _ := decodingStructTypeCache.Load(t); v != nil {
|
||||||
|
return v.(*decodingStructType)
|
||||||
|
}
|
||||||
|
|
||||||
|
flds, structOptions := getFields(t)
|
||||||
|
|
||||||
|
toArray := hasToArrayOption(structOptions)
|
||||||
|
|
||||||
|
var errs []error
|
||||||
|
for i := 0; i < len(flds); i++ {
|
||||||
|
if flds[i].keyAsInt {
|
||||||
|
nameAsInt, numErr := strconv.Atoi(flds[i].name)
|
||||||
|
if numErr != nil {
|
||||||
|
errs = append(errs, errors.New("cbor: failed to parse field name \""+flds[i].name+"\" to int ("+numErr.Error()+")"))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
flds[i].nameAsInt = int64(nameAsInt)
|
||||||
|
}
|
||||||
|
|
||||||
|
flds[i].typInfo = getTypeInfo(flds[i].typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldIndicesByName := make(map[string]int, len(flds))
|
||||||
|
for i, fld := range flds {
|
||||||
|
if _, ok := fieldIndicesByName[fld.name]; ok {
|
||||||
|
errs = append(errs, fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, fld.name))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fieldIndicesByName[fld.name] = i
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
{
|
||||||
|
var multi multierror
|
||||||
|
for _, each := range errs {
|
||||||
|
if each != nil {
|
||||||
|
multi = append(multi, each)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(multi) == 1 {
|
||||||
|
err = multi[0]
|
||||||
|
} else if len(multi) > 1 {
|
||||||
|
err = multi
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
structType := &decodingStructType{
|
||||||
|
fields: flds,
|
||||||
|
fieldIndicesByName: fieldIndicesByName,
|
||||||
|
err: err,
|
||||||
|
toArray: toArray,
|
||||||
|
}
|
||||||
|
decodingStructTypeCache.Store(t, structType)
|
||||||
|
return structType
|
||||||
|
}
|
||||||
|
|
||||||
|
type encodingStructType struct {
|
||||||
|
fields fields
|
||||||
|
bytewiseFields fields
|
||||||
|
lengthFirstFields fields
|
||||||
|
omitEmptyFieldsIdx []int
|
||||||
|
err error
|
||||||
|
toArray bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (st *encodingStructType) getFields(em *encMode) fields {
|
||||||
|
switch em.sort {
|
||||||
|
case SortNone, SortFastShuffle:
|
||||||
|
return st.fields
|
||||||
|
case SortLengthFirst:
|
||||||
|
return st.lengthFirstFields
|
||||||
|
default:
|
||||||
|
return st.bytewiseFields
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type bytewiseFieldSorter struct {
|
||||||
|
fields fields
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *bytewiseFieldSorter) Len() int {
|
||||||
|
return len(x.fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *bytewiseFieldSorter) Swap(i, j int) {
|
||||||
|
x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *bytewiseFieldSorter) Less(i, j int) bool {
|
||||||
|
return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type lengthFirstFieldSorter struct {
|
||||||
|
fields fields
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *lengthFirstFieldSorter) Len() int {
|
||||||
|
return len(x.fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *lengthFirstFieldSorter) Swap(i, j int) {
|
||||||
|
x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *lengthFirstFieldSorter) Less(i, j int) bool {
|
||||||
|
if len(x.fields[i].cborName) != len(x.fields[j].cborName) {
|
||||||
|
return len(x.fields[i].cborName) < len(x.fields[j].cborName)
|
||||||
|
}
|
||||||
|
return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func getEncodingStructType(t reflect.Type) (*encodingStructType, error) {
|
||||||
|
if v, _ := encodingStructTypeCache.Load(t); v != nil {
|
||||||
|
structType := v.(*encodingStructType)
|
||||||
|
return structType, structType.err
|
||||||
|
}
|
||||||
|
|
||||||
|
flds, structOptions := getFields(t)
|
||||||
|
|
||||||
|
if hasToArrayOption(structOptions) {
|
||||||
|
return getEncodingStructToArrayType(t, flds)
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var hasKeyAsInt bool
|
||||||
|
var hasKeyAsStr bool
|
||||||
|
var omitEmptyIdx []int
|
||||||
|
e := getEncodeBuffer()
|
||||||
|
for i := 0; i < len(flds); i++ {
|
||||||
|
// Get field's encodeFunc
|
||||||
|
flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ)
|
||||||
|
if flds[i].ef == nil {
|
||||||
|
err = &UnsupportedTypeError{t}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode field name
|
||||||
|
if flds[i].keyAsInt {
|
||||||
|
nameAsInt, numErr := strconv.Atoi(flds[i].name)
|
||||||
|
if numErr != nil {
|
||||||
|
err = errors.New("cbor: failed to parse field name \"" + flds[i].name + "\" to int (" + numErr.Error() + ")")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
flds[i].nameAsInt = int64(nameAsInt)
|
||||||
|
if nameAsInt >= 0 {
|
||||||
|
encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt))
|
||||||
|
} else {
|
||||||
|
n := nameAsInt*(-1) - 1
|
||||||
|
encodeHead(e, byte(cborTypeNegativeInt), uint64(n))
|
||||||
|
}
|
||||||
|
flds[i].cborName = make([]byte, e.Len())
|
||||||
|
copy(flds[i].cborName, e.Bytes())
|
||||||
|
e.Reset()
|
||||||
|
|
||||||
|
hasKeyAsInt = true
|
||||||
|
} else {
|
||||||
|
encodeHead(e, byte(cborTypeTextString), uint64(len(flds[i].name)))
|
||||||
|
flds[i].cborName = make([]byte, e.Len()+len(flds[i].name))
|
||||||
|
n := copy(flds[i].cborName, e.Bytes())
|
||||||
|
copy(flds[i].cborName[n:], flds[i].name)
|
||||||
|
e.Reset()
|
||||||
|
|
||||||
|
// If cborName contains a text string, then cborNameByteString contains a
|
||||||
|
// string that has the byte string major type but is otherwise identical to
|
||||||
|
// cborName.
|
||||||
|
flds[i].cborNameByteString = make([]byte, len(flds[i].cborName))
|
||||||
|
copy(flds[i].cborNameByteString, flds[i].cborName)
|
||||||
|
// Reset encoded CBOR type to byte string, preserving the "additional
|
||||||
|
// information" bits:
|
||||||
|
flds[i].cborNameByteString[0] = byte(cborTypeByteString) |
|
||||||
|
getAdditionalInformation(flds[i].cborNameByteString[0])
|
||||||
|
|
||||||
|
hasKeyAsStr = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if field can be omitted when empty
|
||||||
|
if flds[i].omitEmpty {
|
||||||
|
omitEmptyIdx = append(omitEmptyIdx, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
putEncodeBuffer(e)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
structType := &encodingStructType{err: err}
|
||||||
|
encodingStructTypeCache.Store(t, structType)
|
||||||
|
return structType, structType.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort fields by canonical order
|
||||||
|
bytewiseFields := make(fields, len(flds))
|
||||||
|
copy(bytewiseFields, flds)
|
||||||
|
sort.Sort(&bytewiseFieldSorter{bytewiseFields})
|
||||||
|
|
||||||
|
lengthFirstFields := bytewiseFields
|
||||||
|
if hasKeyAsInt && hasKeyAsStr {
|
||||||
|
lengthFirstFields = make(fields, len(flds))
|
||||||
|
copy(lengthFirstFields, flds)
|
||||||
|
sort.Sort(&lengthFirstFieldSorter{lengthFirstFields})
|
||||||
|
}
|
||||||
|
|
||||||
|
structType := &encodingStructType{
|
||||||
|
fields: flds,
|
||||||
|
bytewiseFields: bytewiseFields,
|
||||||
|
lengthFirstFields: lengthFirstFields,
|
||||||
|
omitEmptyFieldsIdx: omitEmptyIdx,
|
||||||
|
}
|
||||||
|
|
||||||
|
encodingStructTypeCache.Store(t, structType)
|
||||||
|
return structType, structType.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) {
|
||||||
|
for i := 0; i < len(flds); i++ {
|
||||||
|
// Get field's encodeFunc
|
||||||
|
flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ)
|
||||||
|
if flds[i].ef == nil {
|
||||||
|
structType := &encodingStructType{err: &UnsupportedTypeError{t}}
|
||||||
|
encodingStructTypeCache.Store(t, structType)
|
||||||
|
return structType, structType.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
structType := &encodingStructType{
|
||||||
|
fields: flds,
|
||||||
|
toArray: true,
|
||||||
|
}
|
||||||
|
encodingStructTypeCache.Store(t, structType)
|
||||||
|
return structType, structType.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc) {
|
||||||
|
if v, _ := encodeFuncCache.Load(t); v != nil {
|
||||||
|
fs := v.(encodeFuncs)
|
||||||
|
return fs.ef, fs.ief
|
||||||
|
}
|
||||||
|
ef, ief := getEncodeFuncInternal(t)
|
||||||
|
encodeFuncCache.Store(t, encodeFuncs{ef, ief})
|
||||||
|
return ef, ief
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTypeInfo(t reflect.Type) *typeInfo {
|
||||||
|
if v, _ := typeInfoCache.Load(t); v != nil {
|
||||||
|
return v.(*typeInfo)
|
||||||
|
}
|
||||||
|
tInfo := newTypeInfo(t)
|
||||||
|
typeInfoCache.Store(t, tInfo)
|
||||||
|
return tInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasToArrayOption(tag string) bool {
|
||||||
|
s := ",toarray"
|
||||||
|
idx := strings.Index(tag, s)
|
||||||
|
return idx >= 0 && (len(tag) == idx+len(s) || tag[idx+len(s)] == ',')
|
||||||
|
}
|
182
api/vendor/github.com/fxamacker/cbor/v2/common.go
generated
vendored
Normal file
182
api/vendor/github.com/fxamacker/cbor/v2/common.go
generated
vendored
Normal file
@ -0,0 +1,182 @@
|
|||||||
|
// Copyright (c) Faye Amacker. All rights reserved.
|
||||||
|
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||||
|
|
||||||
|
package cbor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
type cborType uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
cborTypePositiveInt cborType = 0x00
|
||||||
|
cborTypeNegativeInt cborType = 0x20
|
||||||
|
cborTypeByteString cborType = 0x40
|
||||||
|
cborTypeTextString cborType = 0x60
|
||||||
|
cborTypeArray cborType = 0x80
|
||||||
|
cborTypeMap cborType = 0xa0
|
||||||
|
cborTypeTag cborType = 0xc0
|
||||||
|
cborTypePrimitives cborType = 0xe0
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t cborType) String() string {
|
||||||
|
switch t {
|
||||||
|
case cborTypePositiveInt:
|
||||||
|
return "positive integer"
|
||||||
|
case cborTypeNegativeInt:
|
||||||
|
return "negative integer"
|
||||||
|
case cborTypeByteString:
|
||||||
|
return "byte string"
|
||||||
|
case cborTypeTextString:
|
||||||
|
return "UTF-8 text string"
|
||||||
|
case cborTypeArray:
|
||||||
|
return "array"
|
||||||
|
case cborTypeMap:
|
||||||
|
return "map"
|
||||||
|
case cborTypeTag:
|
||||||
|
return "tag"
|
||||||
|
case cborTypePrimitives:
|
||||||
|
return "primitives"
|
||||||
|
default:
|
||||||
|
return "Invalid type " + strconv.Itoa(int(t))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type additionalInformation uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxAdditionalInformationWithoutArgument = 23
|
||||||
|
additionalInformationWith1ByteArgument = 24
|
||||||
|
additionalInformationWith2ByteArgument = 25
|
||||||
|
additionalInformationWith4ByteArgument = 26
|
||||||
|
additionalInformationWith8ByteArgument = 27
|
||||||
|
|
||||||
|
// For major type 7.
|
||||||
|
additionalInformationAsFalse = 20
|
||||||
|
additionalInformationAsTrue = 21
|
||||||
|
additionalInformationAsNull = 22
|
||||||
|
additionalInformationAsUndefined = 23
|
||||||
|
additionalInformationAsFloat16 = 25
|
||||||
|
additionalInformationAsFloat32 = 26
|
||||||
|
additionalInformationAsFloat64 = 27
|
||||||
|
|
||||||
|
// For major type 2, 3, 4, 5.
|
||||||
|
additionalInformationAsIndefiniteLengthFlag = 31
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxSimpleValueInAdditionalInformation = 23
|
||||||
|
minSimpleValueIn1ByteArgument = 32
|
||||||
|
)
|
||||||
|
|
||||||
|
func (ai additionalInformation) isIndefiniteLength() bool {
|
||||||
|
return ai == additionalInformationAsIndefiniteLengthFlag
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// From RFC 8949 Section 3:
|
||||||
|
// "The initial byte of each encoded data item contains both information about the major type
|
||||||
|
// (the high-order 3 bits, described in Section 3.1) and additional information
|
||||||
|
// (the low-order 5 bits)."
|
||||||
|
|
||||||
|
// typeMask is used to extract major type in initial byte of encoded data item.
|
||||||
|
typeMask = 0xe0
|
||||||
|
|
||||||
|
// additionalInformationMask is used to extract additional information in initial byte of encoded data item.
|
||||||
|
additionalInformationMask = 0x1f
|
||||||
|
)
|
||||||
|
|
||||||
|
func getType(raw byte) cborType {
|
||||||
|
return cborType(raw & typeMask)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getAdditionalInformation(raw byte) byte {
|
||||||
|
return raw & additionalInformationMask
|
||||||
|
}
|
||||||
|
|
||||||
|
func isBreakFlag(raw byte) bool {
|
||||||
|
return raw == cborBreakFlag
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseInitialByte(b byte) (t cborType, ai byte) {
|
||||||
|
return getType(b), getAdditionalInformation(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
tagNumRFC3339Time = 0
|
||||||
|
tagNumEpochTime = 1
|
||||||
|
tagNumUnsignedBignum = 2
|
||||||
|
tagNumNegativeBignum = 3
|
||||||
|
tagNumExpectedLaterEncodingBase64URL = 21
|
||||||
|
tagNumExpectedLaterEncodingBase64 = 22
|
||||||
|
tagNumExpectedLaterEncodingBase16 = 23
|
||||||
|
tagNumSelfDescribedCBOR = 55799
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
cborBreakFlag = byte(0xff)
|
||||||
|
cborByteStringWithIndefiniteLengthHead = byte(0x5f)
|
||||||
|
cborTextStringWithIndefiniteLengthHead = byte(0x7f)
|
||||||
|
cborArrayWithIndefiniteLengthHead = byte(0x9f)
|
||||||
|
cborMapWithIndefiniteLengthHead = byte(0xbf)
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
cborFalse = []byte{0xf4}
|
||||||
|
cborTrue = []byte{0xf5}
|
||||||
|
cborNil = []byte{0xf6}
|
||||||
|
cborNaN = []byte{0xf9, 0x7e, 0x00}
|
||||||
|
cborPositiveInfinity = []byte{0xf9, 0x7c, 0x00}
|
||||||
|
cborNegativeInfinity = []byte{0xf9, 0xfc, 0x00}
|
||||||
|
)
|
||||||
|
|
||||||
|
// validBuiltinTag checks that supported built-in tag numbers are followed by expected content types.
|
||||||
|
func validBuiltinTag(tagNum uint64, contentHead byte) error {
|
||||||
|
t := getType(contentHead)
|
||||||
|
switch tagNum {
|
||||||
|
case tagNumRFC3339Time:
|
||||||
|
// Tag content (date/time text string in RFC 3339 format) must be string type.
|
||||||
|
if t != cborTypeTextString {
|
||||||
|
return newInadmissibleTagContentTypeError(
|
||||||
|
tagNumRFC3339Time,
|
||||||
|
"text string",
|
||||||
|
t.String())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case tagNumEpochTime:
|
||||||
|
// Tag content (epoch date/time) must be uint, int, or float type.
|
||||||
|
if t != cborTypePositiveInt && t != cborTypeNegativeInt && (contentHead < 0xf9 || contentHead > 0xfb) {
|
||||||
|
return newInadmissibleTagContentTypeError(
|
||||||
|
tagNumEpochTime,
|
||||||
|
"integer or floating-point number",
|
||||||
|
t.String())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case tagNumUnsignedBignum, tagNumNegativeBignum:
|
||||||
|
// Tag content (bignum) must be byte type.
|
||||||
|
if t != cborTypeByteString {
|
||||||
|
return newInadmissibleTagContentTypeErrorf(
|
||||||
|
fmt.Sprintf(
|
||||||
|
"tag number %d or %d must be followed by byte string, got %s",
|
||||||
|
tagNumUnsignedBignum,
|
||||||
|
tagNumNegativeBignum,
|
||||||
|
t.String(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16:
|
||||||
|
// From RFC 8949 3.4.5.2:
|
||||||
|
// The data item tagged can be a byte string or any other data item. In the latter
|
||||||
|
// case, the tag applies to all of the byte string data items contained in the data
|
||||||
|
// item, except for those contained in a nested data item tagged with an expected
|
||||||
|
// conversion.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
3187
api/vendor/github.com/fxamacker/cbor/v2/decode.go
generated
vendored
Normal file
3187
api/vendor/github.com/fxamacker/cbor/v2/decode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
724
api/vendor/github.com/fxamacker/cbor/v2/diagnose.go
generated
vendored
Normal file
724
api/vendor/github.com/fxamacker/cbor/v2/diagnose.go
generated
vendored
Normal file
@ -0,0 +1,724 @@
|
|||||||
|
// Copyright (c) Faye Amacker. All rights reserved.
|
||||||
|
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||||
|
|
||||||
|
package cbor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/base32"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"math/big"
|
||||||
|
"strconv"
|
||||||
|
"unicode/utf16"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/x448/float16"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DiagMode is the main interface for CBOR diagnostic notation.
|
||||||
|
type DiagMode interface {
|
||||||
|
// Diagnose returns extended diagnostic notation (EDN) of CBOR data items using this DiagMode.
|
||||||
|
Diagnose([]byte) (string, error)
|
||||||
|
|
||||||
|
// DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
|
||||||
|
DiagnoseFirst([]byte) (string, []byte, error)
|
||||||
|
|
||||||
|
// DiagOptions returns user specified options used to create this DiagMode.
|
||||||
|
DiagOptions() DiagOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByteStringEncoding specifies the base encoding that byte strings are notated.
|
||||||
|
type ByteStringEncoding uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ByteStringBase16Encoding encodes byte strings in base16, without padding.
|
||||||
|
ByteStringBase16Encoding ByteStringEncoding = iota
|
||||||
|
|
||||||
|
// ByteStringBase32Encoding encodes byte strings in base32, without padding.
|
||||||
|
ByteStringBase32Encoding
|
||||||
|
|
||||||
|
// ByteStringBase32HexEncoding encodes byte strings in base32hex, without padding.
|
||||||
|
ByteStringBase32HexEncoding
|
||||||
|
|
||||||
|
// ByteStringBase64Encoding encodes byte strings in base64url, without padding.
|
||||||
|
ByteStringBase64Encoding
|
||||||
|
|
||||||
|
maxByteStringEncoding
|
||||||
|
)
|
||||||
|
|
||||||
|
func (bse ByteStringEncoding) valid() error {
|
||||||
|
if bse >= maxByteStringEncoding {
|
||||||
|
return errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(bse)))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiagOptions specifies Diag options.
|
||||||
|
type DiagOptions struct {
|
||||||
|
// ByteStringEncoding specifies the base encoding that byte strings are notated.
|
||||||
|
// Default is ByteStringBase16Encoding.
|
||||||
|
ByteStringEncoding ByteStringEncoding
|
||||||
|
|
||||||
|
// ByteStringHexWhitespace specifies notating with whitespace in byte string
|
||||||
|
// when ByteStringEncoding is ByteStringBase16Encoding.
|
||||||
|
ByteStringHexWhitespace bool
|
||||||
|
|
||||||
|
// ByteStringText specifies notating with text in byte string
|
||||||
|
// if it is a valid UTF-8 text.
|
||||||
|
ByteStringText bool
|
||||||
|
|
||||||
|
// ByteStringEmbeddedCBOR specifies notating embedded CBOR in byte string
|
||||||
|
// if it is a valid CBOR bytes.
|
||||||
|
ByteStringEmbeddedCBOR bool
|
||||||
|
|
||||||
|
// CBORSequence specifies notating CBOR sequences.
|
||||||
|
// otherwise, it returns an error if there are more bytes after the first CBOR.
|
||||||
|
CBORSequence bool
|
||||||
|
|
||||||
|
// FloatPrecisionIndicator specifies appending a suffix to indicate float precision.
|
||||||
|
// Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-encoding-indicators.
|
||||||
|
FloatPrecisionIndicator bool
|
||||||
|
|
||||||
|
// MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags.
|
||||||
|
// Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can
|
||||||
|
// require larger amounts of stack to deserialize. Don't increase this higher than you require.
|
||||||
|
MaxNestedLevels int
|
||||||
|
|
||||||
|
// MaxArrayElements specifies the max number of elements for CBOR arrays.
|
||||||
|
// Default is 128*1024=131072 and it can be set to [16, 2147483647]
|
||||||
|
MaxArrayElements int
|
||||||
|
|
||||||
|
// MaxMapPairs specifies the max number of key-value pairs for CBOR maps.
|
||||||
|
// Default is 128*1024=131072 and it can be set to [16, 2147483647]
|
||||||
|
MaxMapPairs int
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiagMode returns a DiagMode with immutable options.
|
||||||
|
func (opts DiagOptions) DiagMode() (DiagMode, error) {
|
||||||
|
return opts.diagMode()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (opts DiagOptions) diagMode() (*diagMode, error) {
|
||||||
|
if err := opts.ByteStringEncoding.valid(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
decMode, err := DecOptions{
|
||||||
|
MaxNestedLevels: opts.MaxNestedLevels,
|
||||||
|
MaxArrayElements: opts.MaxArrayElements,
|
||||||
|
MaxMapPairs: opts.MaxMapPairs,
|
||||||
|
}.decMode()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &diagMode{
|
||||||
|
byteStringEncoding: opts.ByteStringEncoding,
|
||||||
|
byteStringHexWhitespace: opts.ByteStringHexWhitespace,
|
||||||
|
byteStringText: opts.ByteStringText,
|
||||||
|
byteStringEmbeddedCBOR: opts.ByteStringEmbeddedCBOR,
|
||||||
|
cborSequence: opts.CBORSequence,
|
||||||
|
floatPrecisionIndicator: opts.FloatPrecisionIndicator,
|
||||||
|
decMode: decMode,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type diagMode struct {
|
||||||
|
byteStringEncoding ByteStringEncoding
|
||||||
|
byteStringHexWhitespace bool
|
||||||
|
byteStringText bool
|
||||||
|
byteStringEmbeddedCBOR bool
|
||||||
|
cborSequence bool
|
||||||
|
floatPrecisionIndicator bool
|
||||||
|
decMode *decMode
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiagOptions returns user specified options used to create this DiagMode.
|
||||||
|
func (dm *diagMode) DiagOptions() DiagOptions {
|
||||||
|
return DiagOptions{
|
||||||
|
ByteStringEncoding: dm.byteStringEncoding,
|
||||||
|
ByteStringHexWhitespace: dm.byteStringHexWhitespace,
|
||||||
|
ByteStringText: dm.byteStringText,
|
||||||
|
ByteStringEmbeddedCBOR: dm.byteStringEmbeddedCBOR,
|
||||||
|
CBORSequence: dm.cborSequence,
|
||||||
|
FloatPrecisionIndicator: dm.floatPrecisionIndicator,
|
||||||
|
MaxNestedLevels: dm.decMode.maxNestedLevels,
|
||||||
|
MaxArrayElements: dm.decMode.maxArrayElements,
|
||||||
|
MaxMapPairs: dm.decMode.maxMapPairs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Diagnose returns extended diagnostic notation (EDN) of CBOR data items using the DiagMode.
|
||||||
|
func (dm *diagMode) Diagnose(data []byte) (string, error) {
|
||||||
|
return newDiagnose(data, dm.decMode, dm).diag(dm.cborSequence)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
|
||||||
|
func (dm *diagMode) DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) {
|
||||||
|
return newDiagnose(data, dm.decMode, dm).diagFirst()
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultDiagMode, _ = DiagOptions{}.diagMode()
|
||||||
|
|
||||||
|
// Diagnose returns extended diagnostic notation (EDN) of CBOR data items
|
||||||
|
// using the default diagnostic mode.
|
||||||
|
//
|
||||||
|
// Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-diagnostic-notation.
|
||||||
|
func Diagnose(data []byte) (string, error) {
|
||||||
|
return defaultDiagMode.Diagnose(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Diagnose returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
|
||||||
|
func DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) {
|
||||||
|
return defaultDiagMode.DiagnoseFirst(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
type diagnose struct {
|
||||||
|
dm *diagMode
|
||||||
|
d *decoder
|
||||||
|
w *bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDiagnose(data []byte, decm *decMode, diagm *diagMode) *diagnose {
|
||||||
|
return &diagnose{
|
||||||
|
dm: diagm,
|
||||||
|
d: &decoder{data: data, dm: decm},
|
||||||
|
w: &bytes.Buffer{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (di *diagnose) diag(cborSequence bool) (string, error) {
|
||||||
|
// CBOR Sequence
|
||||||
|
firstItem := true
|
||||||
|
for {
|
||||||
|
switch err := di.wellformed(cborSequence); err {
|
||||||
|
case nil:
|
||||||
|
if !firstItem {
|
||||||
|
di.w.WriteString(", ")
|
||||||
|
}
|
||||||
|
firstItem = false
|
||||||
|
if itemErr := di.item(); itemErr != nil {
|
||||||
|
return di.w.String(), itemErr
|
||||||
|
}
|
||||||
|
|
||||||
|
case io.EOF:
|
||||||
|
if firstItem {
|
||||||
|
return di.w.String(), err
|
||||||
|
}
|
||||||
|
return di.w.String(), nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return di.w.String(), err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (di *diagnose) diagFirst() (diagNotation string, rest []byte, err error) {
|
||||||
|
err = di.wellformed(true)
|
||||||
|
if err == nil {
|
||||||
|
err = di.item()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
// Return EDN and the rest of the data slice (which might be len 0)
|
||||||
|
return di.w.String(), di.d.data[di.d.off:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return di.w.String(), nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (di *diagnose) wellformed(allowExtraData bool) error {
|
||||||
|
off := di.d.off
|
||||||
|
err := di.d.wellformed(allowExtraData, false)
|
||||||
|
di.d.off = off
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (di *diagnose) item() error { //nolint:gocyclo
|
||||||
|
initialByte := di.d.data[di.d.off]
|
||||||
|
switch initialByte {
|
||||||
|
case cborByteStringWithIndefiniteLengthHead,
|
||||||
|
cborTextStringWithIndefiniteLengthHead: // indefinite-length byte/text string
|
||||||
|
di.d.off++
|
||||||
|
if isBreakFlag(di.d.data[di.d.off]) {
|
||||||
|
di.d.off++
|
||||||
|
switch initialByte {
|
||||||
|
case cborByteStringWithIndefiniteLengthHead:
|
||||||
|
// indefinite-length bytes with no chunks.
|
||||||
|
di.w.WriteString(`''_`)
|
||||||
|
return nil
|
||||||
|
case cborTextStringWithIndefiniteLengthHead:
|
||||||
|
// indefinite-length text with no chunks.
|
||||||
|
di.w.WriteString(`""_`)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
di.w.WriteString("(_ ")
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for !di.d.foundBreak() {
|
||||||
|
if i > 0 {
|
||||||
|
di.w.WriteString(", ")
|
||||||
|
}
|
||||||
|
|
||||||
|
i++
|
||||||
|
// wellformedIndefiniteString() already checked that the next item is a byte/text string.
|
||||||
|
if err := di.item(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
di.w.WriteByte(')')
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case cborArrayWithIndefiniteLengthHead: // indefinite-length array
|
||||||
|
di.d.off++
|
||||||
|
di.w.WriteString("[_ ")
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for !di.d.foundBreak() {
|
||||||
|
if i > 0 {
|
||||||
|
di.w.WriteString(", ")
|
||||||
|
}
|
||||||
|
|
||||||
|
i++
|
||||||
|
if err := di.item(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
di.w.WriteByte(']')
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case cborMapWithIndefiniteLengthHead: // indefinite-length map
|
||||||
|
di.d.off++
|
||||||
|
di.w.WriteString("{_ ")
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for !di.d.foundBreak() {
|
||||||
|
if i > 0 {
|
||||||
|
di.w.WriteString(", ")
|
||||||
|
}
|
||||||
|
|
||||||
|
i++
|
||||||
|
// key
|
||||||
|
if err := di.item(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
di.w.WriteString(": ")
|
||||||
|
|
||||||
|
// value
|
||||||
|
if err := di.item(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
di.w.WriteByte('}')
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
t := di.d.nextCBORType()
|
||||||
|
switch t {
|
||||||
|
case cborTypePositiveInt:
|
||||||
|
_, _, val := di.d.getHead()
|
||||||
|
di.w.WriteString(strconv.FormatUint(val, 10))
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case cborTypeNegativeInt:
|
||||||
|
_, _, val := di.d.getHead()
|
||||||
|
if val > math.MaxInt64 {
|
||||||
|
// CBOR negative integer overflows int64, use big.Int to store value.
|
||||||
|
bi := new(big.Int)
|
||||||
|
bi.SetUint64(val)
|
||||||
|
bi.Add(bi, big.NewInt(1))
|
||||||
|
bi.Neg(bi)
|
||||||
|
di.w.WriteString(bi.String())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
nValue := int64(-1) ^ int64(val)
|
||||||
|
di.w.WriteString(strconv.FormatInt(nValue, 10))
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case cborTypeByteString:
|
||||||
|
b, _ := di.d.parseByteString()
|
||||||
|
return di.encodeByteString(b)
|
||||||
|
|
||||||
|
case cborTypeTextString:
|
||||||
|
b, err := di.d.parseTextString()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return di.encodeTextString(string(b), '"')
|
||||||
|
|
||||||
|
case cborTypeArray:
|
||||||
|
_, _, val := di.d.getHead()
|
||||||
|
count := int(val)
|
||||||
|
di.w.WriteByte('[')
|
||||||
|
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
if i > 0 {
|
||||||
|
di.w.WriteString(", ")
|
||||||
|
}
|
||||||
|
if err := di.item(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
di.w.WriteByte(']')
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case cborTypeMap:
|
||||||
|
_, _, val := di.d.getHead()
|
||||||
|
count := int(val)
|
||||||
|
di.w.WriteByte('{')
|
||||||
|
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
if i > 0 {
|
||||||
|
di.w.WriteString(", ")
|
||||||
|
}
|
||||||
|
// key
|
||||||
|
if err := di.item(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
di.w.WriteString(": ")
|
||||||
|
// value
|
||||||
|
if err := di.item(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
di.w.WriteByte('}')
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case cborTypeTag:
|
||||||
|
_, _, tagNum := di.d.getHead()
|
||||||
|
switch tagNum {
|
||||||
|
case tagNumUnsignedBignum:
|
||||||
|
if nt := di.d.nextCBORType(); nt != cborTypeByteString {
|
||||||
|
return newInadmissibleTagContentTypeError(
|
||||||
|
tagNumUnsignedBignum,
|
||||||
|
"byte string",
|
||||||
|
nt.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
b, _ := di.d.parseByteString()
|
||||||
|
bi := new(big.Int).SetBytes(b)
|
||||||
|
di.w.WriteString(bi.String())
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case tagNumNegativeBignum:
|
||||||
|
if nt := di.d.nextCBORType(); nt != cborTypeByteString {
|
||||||
|
return newInadmissibleTagContentTypeError(
|
||||||
|
tagNumNegativeBignum,
|
||||||
|
"byte string",
|
||||||
|
nt.String(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
b, _ := di.d.parseByteString()
|
||||||
|
bi := new(big.Int).SetBytes(b)
|
||||||
|
bi.Add(bi, big.NewInt(1))
|
||||||
|
bi.Neg(bi)
|
||||||
|
di.w.WriteString(bi.String())
|
||||||
|
return nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
di.w.WriteString(strconv.FormatUint(tagNum, 10))
|
||||||
|
di.w.WriteByte('(')
|
||||||
|
if err := di.item(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
di.w.WriteByte(')')
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case cborTypePrimitives:
|
||||||
|
_, ai, val := di.d.getHead()
|
||||||
|
switch ai {
|
||||||
|
case additionalInformationAsFalse:
|
||||||
|
di.w.WriteString("false")
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case additionalInformationAsTrue:
|
||||||
|
di.w.WriteString("true")
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case additionalInformationAsNull:
|
||||||
|
di.w.WriteString("null")
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case additionalInformationAsUndefined:
|
||||||
|
di.w.WriteString("undefined")
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case additionalInformationAsFloat16,
|
||||||
|
additionalInformationAsFloat32,
|
||||||
|
additionalInformationAsFloat64:
|
||||||
|
return di.encodeFloat(ai, val)
|
||||||
|
|
||||||
|
default:
|
||||||
|
di.w.WriteString("simple(")
|
||||||
|
di.w.WriteString(strconv.FormatUint(val, 10))
|
||||||
|
di.w.WriteByte(')')
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeU16 format a rune as "\uxxxx"
|
||||||
|
func (di *diagnose) writeU16(val rune) {
|
||||||
|
di.w.WriteString("\\u")
|
||||||
|
var in [2]byte
|
||||||
|
in[0] = byte(val >> 8)
|
||||||
|
in[1] = byte(val)
|
||||||
|
sz := hex.EncodedLen(len(in))
|
||||||
|
di.w.Grow(sz)
|
||||||
|
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||||
|
hex.Encode(dst, in[:])
|
||||||
|
di.w.Write(dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
var rawBase32Encoding = base32.StdEncoding.WithPadding(base32.NoPadding)
|
||||||
|
var rawBase32HexEncoding = base32.HexEncoding.WithPadding(base32.NoPadding)
|
||||||
|
|
||||||
|
func (di *diagnose) encodeByteString(val []byte) error {
|
||||||
|
if len(val) > 0 {
|
||||||
|
if di.dm.byteStringText && utf8.Valid(val) {
|
||||||
|
return di.encodeTextString(string(val), '\'')
|
||||||
|
}
|
||||||
|
|
||||||
|
if di.dm.byteStringEmbeddedCBOR {
|
||||||
|
di2 := newDiagnose(val, di.dm.decMode, di.dm)
|
||||||
|
// should always notating embedded CBOR sequence.
|
||||||
|
if str, err := di2.diag(true); err == nil {
|
||||||
|
di.w.WriteString("<<")
|
||||||
|
di.w.WriteString(str)
|
||||||
|
di.w.WriteString(">>")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch di.dm.byteStringEncoding {
|
||||||
|
case ByteStringBase16Encoding:
|
||||||
|
di.w.WriteString("h'")
|
||||||
|
if di.dm.byteStringHexWhitespace {
|
||||||
|
sz := hex.EncodedLen(len(val))
|
||||||
|
if len(val) > 0 {
|
||||||
|
sz += len(val) - 1
|
||||||
|
}
|
||||||
|
di.w.Grow(sz)
|
||||||
|
|
||||||
|
dst := di.w.Bytes()[di.w.Len():]
|
||||||
|
for i := range val {
|
||||||
|
if i > 0 {
|
||||||
|
dst = append(dst, ' ')
|
||||||
|
}
|
||||||
|
hex.Encode(dst[len(dst):len(dst)+2], val[i:i+1])
|
||||||
|
dst = dst[:len(dst)+2]
|
||||||
|
}
|
||||||
|
di.w.Write(dst)
|
||||||
|
} else {
|
||||||
|
sz := hex.EncodedLen(len(val))
|
||||||
|
di.w.Grow(sz)
|
||||||
|
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||||
|
hex.Encode(dst, val)
|
||||||
|
di.w.Write(dst)
|
||||||
|
}
|
||||||
|
di.w.WriteByte('\'')
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case ByteStringBase32Encoding:
|
||||||
|
di.w.WriteString("b32'")
|
||||||
|
sz := rawBase32Encoding.EncodedLen(len(val))
|
||||||
|
di.w.Grow(sz)
|
||||||
|
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||||
|
rawBase32Encoding.Encode(dst, val)
|
||||||
|
di.w.Write(dst)
|
||||||
|
di.w.WriteByte('\'')
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case ByteStringBase32HexEncoding:
|
||||||
|
di.w.WriteString("h32'")
|
||||||
|
sz := rawBase32HexEncoding.EncodedLen(len(val))
|
||||||
|
di.w.Grow(sz)
|
||||||
|
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||||
|
rawBase32HexEncoding.Encode(dst, val)
|
||||||
|
di.w.Write(dst)
|
||||||
|
di.w.WriteByte('\'')
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case ByteStringBase64Encoding:
|
||||||
|
di.w.WriteString("b64'")
|
||||||
|
sz := base64.RawURLEncoding.EncodedLen(len(val))
|
||||||
|
di.w.Grow(sz)
|
||||||
|
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||||
|
base64.RawURLEncoding.Encode(dst, val)
|
||||||
|
di.w.Write(dst)
|
||||||
|
di.w.WriteByte('\'')
|
||||||
|
return nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
// It should not be possible for users to construct a *diagMode with an invalid byte
|
||||||
|
// string encoding.
|
||||||
|
panic(fmt.Sprintf("diagmode has invalid ByteStringEncoding %v", di.dm.byteStringEncoding))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const utf16SurrSelf = rune(0x10000)
|
||||||
|
|
||||||
|
// quote should be either `'` or `"`
|
||||||
|
func (di *diagnose) encodeTextString(val string, quote byte) error {
|
||||||
|
di.w.WriteByte(quote)
|
||||||
|
|
||||||
|
for i := 0; i < len(val); {
|
||||||
|
if b := val[i]; b < utf8.RuneSelf {
|
||||||
|
switch {
|
||||||
|
case b == '\t', b == '\n', b == '\r', b == '\\', b == quote:
|
||||||
|
di.w.WriteByte('\\')
|
||||||
|
|
||||||
|
switch b {
|
||||||
|
case '\t':
|
||||||
|
b = 't'
|
||||||
|
case '\n':
|
||||||
|
b = 'n'
|
||||||
|
case '\r':
|
||||||
|
b = 'r'
|
||||||
|
}
|
||||||
|
di.w.WriteByte(b)
|
||||||
|
|
||||||
|
case b >= ' ' && b <= '~':
|
||||||
|
di.w.WriteByte(b)
|
||||||
|
|
||||||
|
default:
|
||||||
|
di.writeU16(rune(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
i++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
c, size := utf8.DecodeRuneInString(val[i:])
|
||||||
|
switch {
|
||||||
|
case c == utf8.RuneError:
|
||||||
|
return &SemanticError{"cbor: invalid UTF-8 string"}
|
||||||
|
|
||||||
|
case c < utf16SurrSelf:
|
||||||
|
di.writeU16(c)
|
||||||
|
|
||||||
|
default:
|
||||||
|
c1, c2 := utf16.EncodeRune(c)
|
||||||
|
di.writeU16(c1)
|
||||||
|
di.writeU16(c2)
|
||||||
|
}
|
||||||
|
|
||||||
|
i += size
|
||||||
|
}
|
||||||
|
|
||||||
|
di.w.WriteByte(quote)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (di *diagnose) encodeFloat(ai byte, val uint64) error {
|
||||||
|
f64 := float64(0)
|
||||||
|
switch ai {
|
||||||
|
case additionalInformationAsFloat16:
|
||||||
|
f16 := float16.Frombits(uint16(val))
|
||||||
|
switch {
|
||||||
|
case f16.IsNaN():
|
||||||
|
di.w.WriteString("NaN")
|
||||||
|
return nil
|
||||||
|
case f16.IsInf(1):
|
||||||
|
di.w.WriteString("Infinity")
|
||||||
|
return nil
|
||||||
|
case f16.IsInf(-1):
|
||||||
|
di.w.WriteString("-Infinity")
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
f64 = float64(f16.Float32())
|
||||||
|
}
|
||||||
|
|
||||||
|
case additionalInformationAsFloat32:
|
||||||
|
f32 := math.Float32frombits(uint32(val))
|
||||||
|
switch {
|
||||||
|
case f32 != f32:
|
||||||
|
di.w.WriteString("NaN")
|
||||||
|
return nil
|
||||||
|
case f32 > math.MaxFloat32:
|
||||||
|
di.w.WriteString("Infinity")
|
||||||
|
return nil
|
||||||
|
case f32 < -math.MaxFloat32:
|
||||||
|
di.w.WriteString("-Infinity")
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
f64 = float64(f32)
|
||||||
|
}
|
||||||
|
|
||||||
|
case additionalInformationAsFloat64:
|
||||||
|
f64 = math.Float64frombits(val)
|
||||||
|
switch {
|
||||||
|
case f64 != f64:
|
||||||
|
di.w.WriteString("NaN")
|
||||||
|
return nil
|
||||||
|
case f64 > math.MaxFloat64:
|
||||||
|
di.w.WriteString("Infinity")
|
||||||
|
return nil
|
||||||
|
case f64 < -math.MaxFloat64:
|
||||||
|
di.w.WriteString("-Infinity")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Use ES6 number to string conversion which should match most JSON generators.
|
||||||
|
// Inspired by https://github.com/golang/go/blob/4df10fba1687a6d4f51d7238a403f8f2298f6a16/src/encoding/json/encode.go#L585
|
||||||
|
const bitSize = 64
|
||||||
|
b := make([]byte, 0, 32)
|
||||||
|
if abs := math.Abs(f64); abs != 0 && (abs < 1e-6 || abs >= 1e21) {
|
||||||
|
b = strconv.AppendFloat(b, f64, 'e', -1, bitSize)
|
||||||
|
// clean up e-09 to e-9
|
||||||
|
n := len(b)
|
||||||
|
if n >= 4 && string(b[n-4:n-1]) == "e-0" {
|
||||||
|
b = append(b[:n-2], b[n-1])
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
b = strconv.AppendFloat(b, f64, 'f', -1, bitSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// add decimal point and trailing zero if needed
|
||||||
|
if bytes.IndexByte(b, '.') < 0 {
|
||||||
|
if i := bytes.IndexByte(b, 'e'); i < 0 {
|
||||||
|
b = append(b, '.', '0')
|
||||||
|
} else {
|
||||||
|
b = append(b[:i+2], b[i:]...)
|
||||||
|
b[i] = '.'
|
||||||
|
b[i+1] = '0'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
di.w.WriteString(string(b))
|
||||||
|
|
||||||
|
if di.dm.floatPrecisionIndicator {
|
||||||
|
switch ai {
|
||||||
|
case additionalInformationAsFloat16:
|
||||||
|
di.w.WriteString("_1")
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case additionalInformationAsFloat32:
|
||||||
|
di.w.WriteString("_2")
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case additionalInformationAsFloat64:
|
||||||
|
di.w.WriteString("_3")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
129
api/vendor/github.com/fxamacker/cbor/v2/doc.go
generated
vendored
Normal file
129
api/vendor/github.com/fxamacker/cbor/v2/doc.go
generated
vendored
Normal file
@ -0,0 +1,129 @@
|
|||||||
|
// Copyright (c) Faye Amacker. All rights reserved.
|
||||||
|
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package cbor is a modern CBOR codec (RFC 8949 & RFC 7049) with CBOR tags,
|
||||||
|
Go struct tags (toarray/keyasint/omitempty), Core Deterministic Encoding,
|
||||||
|
CTAP2, Canonical CBOR, float64->32->16, and duplicate map key detection.
|
||||||
|
|
||||||
|
Encoding options allow "preferred serialization" by encoding integers and floats
|
||||||
|
to their smallest forms (e.g. float16) when values fit.
|
||||||
|
|
||||||
|
Struct tags like "keyasint", "toarray" and "omitempty" make CBOR data smaller
|
||||||
|
and easier to use with structs.
|
||||||
|
|
||||||
|
For example, "toarray" tag makes struct fields encode to CBOR array elements. And
|
||||||
|
"keyasint" makes a field encode to an element of CBOR map with specified int key.
|
||||||
|
|
||||||
|
Latest docs can be viewed at https://github.com/fxamacker/cbor#cbor-library-in-go
|
||||||
|
|
||||||
|
# Basics
|
||||||
|
|
||||||
|
The Quick Start guide is at https://github.com/fxamacker/cbor#quick-start
|
||||||
|
|
||||||
|
Function signatures identical to encoding/json include:
|
||||||
|
|
||||||
|
Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode.
|
||||||
|
|
||||||
|
Standard interfaces include:
|
||||||
|
|
||||||
|
BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler.
|
||||||
|
|
||||||
|
Custom encoding and decoding is possible by implementing standard interfaces for
|
||||||
|
user-defined Go types.
|
||||||
|
|
||||||
|
Codec functions are available at package-level (using defaults options) or by
|
||||||
|
creating modes from options at runtime.
|
||||||
|
|
||||||
|
"Mode" in this API means definite way of encoding (EncMode) or decoding (DecMode).
|
||||||
|
|
||||||
|
EncMode and DecMode interfaces are created from EncOptions or DecOptions structs.
|
||||||
|
|
||||||
|
em, err := cbor.EncOptions{...}.EncMode()
|
||||||
|
em, err := cbor.CanonicalEncOptions().EncMode()
|
||||||
|
em, err := cbor.CTAP2EncOptions().EncMode()
|
||||||
|
|
||||||
|
Modes use immutable options to avoid side-effects and simplify concurrency. Behavior of
|
||||||
|
modes won't accidentally change at runtime after they're created.
|
||||||
|
|
||||||
|
Modes are intended to be reused and are safe for concurrent use.
|
||||||
|
|
||||||
|
EncMode and DecMode Interfaces
|
||||||
|
|
||||||
|
// EncMode interface uses immutable options and is safe for concurrent use.
|
||||||
|
type EncMode interface {
|
||||||
|
Marshal(v interface{}) ([]byte, error)
|
||||||
|
NewEncoder(w io.Writer) *Encoder
|
||||||
|
EncOptions() EncOptions // returns copy of options
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecMode interface uses immutable options and is safe for concurrent use.
|
||||||
|
type DecMode interface {
|
||||||
|
Unmarshal(data []byte, v interface{}) error
|
||||||
|
NewDecoder(r io.Reader) *Decoder
|
||||||
|
DecOptions() DecOptions // returns copy of options
|
||||||
|
}
|
||||||
|
|
||||||
|
Using Default Encoding Mode
|
||||||
|
|
||||||
|
b, err := cbor.Marshal(v)
|
||||||
|
|
||||||
|
encoder := cbor.NewEncoder(w)
|
||||||
|
err = encoder.Encode(v)
|
||||||
|
|
||||||
|
Using Default Decoding Mode
|
||||||
|
|
||||||
|
err := cbor.Unmarshal(b, &v)
|
||||||
|
|
||||||
|
decoder := cbor.NewDecoder(r)
|
||||||
|
err = decoder.Decode(&v)
|
||||||
|
|
||||||
|
Creating and Using Encoding Modes
|
||||||
|
|
||||||
|
// Create EncOptions using either struct literal or a function.
|
||||||
|
opts := cbor.CanonicalEncOptions()
|
||||||
|
|
||||||
|
// If needed, modify encoding options
|
||||||
|
opts.Time = cbor.TimeUnix
|
||||||
|
|
||||||
|
// Create reusable EncMode interface with immutable options, safe for concurrent use.
|
||||||
|
em, err := opts.EncMode()
|
||||||
|
|
||||||
|
// Use EncMode like encoding/json, with same function signatures.
|
||||||
|
b, err := em.Marshal(v)
|
||||||
|
// or
|
||||||
|
encoder := em.NewEncoder(w)
|
||||||
|
err := encoder.Encode(v)
|
||||||
|
|
||||||
|
// NOTE: Both em.Marshal(v) and encoder.Encode(v) use encoding options
|
||||||
|
// specified during creation of em (encoding mode).
|
||||||
|
|
||||||
|
# CBOR Options
|
||||||
|
|
||||||
|
Predefined Encoding Options: https://github.com/fxamacker/cbor#predefined-encoding-options
|
||||||
|
|
||||||
|
Encoding Options: https://github.com/fxamacker/cbor#encoding-options
|
||||||
|
|
||||||
|
Decoding Options: https://github.com/fxamacker/cbor#decoding-options
|
||||||
|
|
||||||
|
# Struct Tags
|
||||||
|
|
||||||
|
Struct tags like `cbor:"name,omitempty"` and `json:"name,omitempty"` work as expected.
|
||||||
|
If both struct tags are specified then `cbor` is used.
|
||||||
|
|
||||||
|
Struct tags like "keyasint", "toarray", and "omitempty" make it easy to use
|
||||||
|
very compact formats like COSE and CWT (CBOR Web Tokens) with structs.
|
||||||
|
|
||||||
|
For example, "toarray" makes struct fields encode to array elements. And "keyasint"
|
||||||
|
makes struct fields encode to elements of CBOR map with int keys.
|
||||||
|
|
||||||
|
https://raw.githubusercontent.com/fxamacker/images/master/cbor/v2.0.0/cbor_easy_api.png
|
||||||
|
|
||||||
|
Struct tags are listed at https://github.com/fxamacker/cbor#struct-tags-1
|
||||||
|
|
||||||
|
# Tests and Fuzzing
|
||||||
|
|
||||||
|
Over 375 tests are included in this package. Cover-guided fuzzing is handled by
|
||||||
|
a private fuzzer that replaced fxamacker/cbor-fuzz years ago.
|
||||||
|
*/
|
||||||
|
package cbor
|
1989
api/vendor/github.com/fxamacker/cbor/v2/encode.go
generated
vendored
Normal file
1989
api/vendor/github.com/fxamacker/cbor/v2/encode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
94
api/vendor/github.com/fxamacker/cbor/v2/encode_map.go
generated
vendored
Normal file
94
api/vendor/github.com/fxamacker/cbor/v2/encode_map.go
generated
vendored
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
// Copyright (c) Faye Amacker. All rights reserved.
|
||||||
|
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||||
|
|
||||||
|
//go:build go1.20
|
||||||
|
|
||||||
|
package cbor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mapKeyValueEncodeFunc struct {
|
||||||
|
kf, ef encodeFunc
|
||||||
|
kpool, vpool sync.Pool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error {
|
||||||
|
iterk := me.kpool.Get().(*reflect.Value)
|
||||||
|
defer func() {
|
||||||
|
iterk.SetZero()
|
||||||
|
me.kpool.Put(iterk)
|
||||||
|
}()
|
||||||
|
iterv := me.vpool.Get().(*reflect.Value)
|
||||||
|
defer func() {
|
||||||
|
iterv.SetZero()
|
||||||
|
me.vpool.Put(iterv)
|
||||||
|
}()
|
||||||
|
|
||||||
|
if kvs == nil {
|
||||||
|
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||||
|
iterk.SetIterKey(iter)
|
||||||
|
iterv.SetIterValue(iter)
|
||||||
|
|
||||||
|
if err := me.kf(e, em, *iterk); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := me.ef(e, em, *iterv); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
initial := e.Len()
|
||||||
|
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||||
|
iterk.SetIterKey(iter)
|
||||||
|
iterv.SetIterValue(iter)
|
||||||
|
|
||||||
|
offset := e.Len()
|
||||||
|
if err := me.kf(e, em, *iterk); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
valueOffset := e.Len()
|
||||||
|
if err := me.ef(e, em, *iterv); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
kvs[i] = keyValue{
|
||||||
|
offset: offset - initial,
|
||||||
|
valueOffset: valueOffset - initial,
|
||||||
|
nextOffset: e.Len() - initial,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getEncodeMapFunc(t reflect.Type) encodeFunc {
|
||||||
|
kf, _ := getEncodeFunc(t.Key())
|
||||||
|
ef, _ := getEncodeFunc(t.Elem())
|
||||||
|
if kf == nil || ef == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
mkv := &mapKeyValueEncodeFunc{
|
||||||
|
kf: kf,
|
||||||
|
ef: ef,
|
||||||
|
kpool: sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
rk := reflect.New(t.Key()).Elem()
|
||||||
|
return &rk
|
||||||
|
},
|
||||||
|
},
|
||||||
|
vpool: sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
rv := reflect.New(t.Elem()).Elem()
|
||||||
|
return &rv
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return mapEncodeFunc{
|
||||||
|
e: mkv.encodeKeyValues,
|
||||||
|
}.encode
|
||||||
|
}
|
60
api/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go
generated
vendored
Normal file
60
api/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
// Copyright (c) Faye Amacker. All rights reserved.
|
||||||
|
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||||
|
|
||||||
|
//go:build !go1.20
|
||||||
|
|
||||||
|
package cbor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mapKeyValueEncodeFunc struct {
|
||||||
|
kf, ef encodeFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error {
|
||||||
|
if kvs == nil {
|
||||||
|
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||||
|
if err := me.kf(e, em, iter.Key()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := me.ef(e, em, iter.Value()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
initial := e.Len()
|
||||||
|
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||||
|
offset := e.Len()
|
||||||
|
if err := me.kf(e, em, iter.Key()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
valueOffset := e.Len()
|
||||||
|
if err := me.ef(e, em, iter.Value()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
kvs[i] = keyValue{
|
||||||
|
offset: offset - initial,
|
||||||
|
valueOffset: valueOffset - initial,
|
||||||
|
nextOffset: e.Len() - initial,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getEncodeMapFunc(t reflect.Type) encodeFunc {
|
||||||
|
kf, _ := getEncodeFunc(t.Key())
|
||||||
|
ef, _ := getEncodeFunc(t.Elem())
|
||||||
|
if kf == nil || ef == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
mkv := &mapKeyValueEncodeFunc{kf: kf, ef: ef}
|
||||||
|
return mapEncodeFunc{
|
||||||
|
e: mkv.encodeKeyValues,
|
||||||
|
}.encode
|
||||||
|
}
|
69
api/vendor/github.com/fxamacker/cbor/v2/simplevalue.go
generated
vendored
Normal file
69
api/vendor/github.com/fxamacker/cbor/v2/simplevalue.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
package cbor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SimpleValue represents CBOR simple value.
|
||||||
|
// CBOR simple value is:
|
||||||
|
// - an extension point like CBOR tag.
|
||||||
|
// - a subset of CBOR major type 7 that isn't floating-point.
|
||||||
|
// - "identified by a number between 0 and 255, but distinct from that number itself".
|
||||||
|
// For example, "a simple value 2 is not equivalent to an integer 2" as a CBOR map key.
|
||||||
|
//
|
||||||
|
// CBOR simple values identified by 20..23 are: "false", "true" , "null", and "undefined".
|
||||||
|
// Other CBOR simple values are currently unassigned/reserved by IANA.
|
||||||
|
type SimpleValue uint8
|
||||||
|
|
||||||
|
var (
|
||||||
|
typeSimpleValue = reflect.TypeOf(SimpleValue(0))
|
||||||
|
)
|
||||||
|
|
||||||
|
// MarshalCBOR encodes SimpleValue as CBOR simple value (major type 7).
|
||||||
|
func (sv SimpleValue) MarshalCBOR() ([]byte, error) {
|
||||||
|
// RFC 8949 3.3. Floating-Point Numbers and Values with No Content says:
|
||||||
|
// "An encoder MUST NOT issue two-byte sequences that start with 0xf8
|
||||||
|
// (major type 7, additional information 24) and continue with a byte
|
||||||
|
// less than 0x20 (32 decimal). Such sequences are not well-formed.
|
||||||
|
// (This implies that an encoder cannot encode false, true, null, or
|
||||||
|
// undefined in two-byte sequences and that only the one-byte variants
|
||||||
|
// of these are well-formed; more generally speaking, each simple value
|
||||||
|
// only has a single representation variant)."
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case sv <= maxSimpleValueInAdditionalInformation:
|
||||||
|
return []byte{byte(cborTypePrimitives) | byte(sv)}, nil
|
||||||
|
|
||||||
|
case sv >= minSimpleValueIn1ByteArgument:
|
||||||
|
return []byte{byte(cborTypePrimitives) | additionalInformationWith1ByteArgument, byte(sv)}, nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, &UnsupportedValueError{msg: fmt.Sprintf("SimpleValue(%d)", sv)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue.
|
||||||
|
func (sv *SimpleValue) UnmarshalCBOR(data []byte) error {
|
||||||
|
if sv == nil {
|
||||||
|
return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer")
|
||||||
|
}
|
||||||
|
|
||||||
|
d := decoder{data: data, dm: defaultDecMode}
|
||||||
|
|
||||||
|
typ, ai, val := d.getHead()
|
||||||
|
|
||||||
|
if typ != cborTypePrimitives {
|
||||||
|
return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue"}
|
||||||
|
}
|
||||||
|
if ai > additionalInformationWith1ByteArgument {
|
||||||
|
return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue", errorMsg: "not simple values"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// It is safe to cast val to uint8 here because
|
||||||
|
// - data is already verified to be well-formed CBOR simple value and
|
||||||
|
// - val is <= math.MaxUint8.
|
||||||
|
*sv = SimpleValue(val)
|
||||||
|
return nil
|
||||||
|
}
|
277
api/vendor/github.com/fxamacker/cbor/v2/stream.go
generated
vendored
Normal file
277
api/vendor/github.com/fxamacker/cbor/v2/stream.go
generated
vendored
Normal file
@ -0,0 +1,277 @@
|
|||||||
|
// Copyright (c) Faye Amacker. All rights reserved.
|
||||||
|
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||||
|
|
||||||
|
package cbor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Decoder reads and decodes CBOR values from io.Reader.
|
||||||
|
type Decoder struct {
|
||||||
|
r io.Reader
|
||||||
|
d decoder
|
||||||
|
buf []byte
|
||||||
|
off int // next read offset in buf
|
||||||
|
bytesRead int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder returns a new decoder that reads and decodes from r using
|
||||||
|
// the default decoding options.
|
||||||
|
func NewDecoder(r io.Reader) *Decoder {
|
||||||
|
return defaultDecMode.NewDecoder(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode reads CBOR value and decodes it into the value pointed to by v.
|
||||||
|
func (dec *Decoder) Decode(v interface{}) error {
|
||||||
|
_, err := dec.readNext()
|
||||||
|
if err != nil {
|
||||||
|
// Return validation error or read error.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dec.d.reset(dec.buf[dec.off:])
|
||||||
|
err = dec.d.value(v)
|
||||||
|
|
||||||
|
// Increment dec.off even if decoding err is not nil because
|
||||||
|
// dec.d.off points to the next CBOR data item if current
|
||||||
|
// CBOR data item is valid but failed to be decoded into v.
|
||||||
|
// This allows next CBOR data item to be decoded in next
|
||||||
|
// call to this function.
|
||||||
|
dec.off += dec.d.off
|
||||||
|
dec.bytesRead += dec.d.off
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip skips to the next CBOR data item (if there is any),
|
||||||
|
// otherwise it returns error such as io.EOF, io.UnexpectedEOF, etc.
|
||||||
|
func (dec *Decoder) Skip() error {
|
||||||
|
n, err := dec.readNext()
|
||||||
|
if err != nil {
|
||||||
|
// Return validation error or read error.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dec.off += n
|
||||||
|
dec.bytesRead += n
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NumBytesRead returns the number of bytes read.
|
||||||
|
func (dec *Decoder) NumBytesRead() int {
|
||||||
|
return dec.bytesRead
|
||||||
|
}
|
||||||
|
|
||||||
|
// Buffered returns a reader for data remaining in Decoder's buffer.
|
||||||
|
// Returned reader is valid until the next call to Decode or Skip.
|
||||||
|
func (dec *Decoder) Buffered() io.Reader {
|
||||||
|
return bytes.NewReader(dec.buf[dec.off:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// readNext() reads next CBOR data item from Reader to buffer.
|
||||||
|
// It returns the size of next CBOR data item.
|
||||||
|
// It also returns validation error or read error if any.
|
||||||
|
func (dec *Decoder) readNext() (int, error) {
|
||||||
|
var readErr error
|
||||||
|
var validErr error
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Process any unread data in dec.buf.
|
||||||
|
if dec.off < len(dec.buf) {
|
||||||
|
dec.d.reset(dec.buf[dec.off:])
|
||||||
|
off := dec.off // Save offset before data validation
|
||||||
|
validErr = dec.d.wellformed(true, false)
|
||||||
|
dec.off = off // Restore offset
|
||||||
|
|
||||||
|
if validErr == nil {
|
||||||
|
return dec.d.off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if validErr != io.ErrUnexpectedEOF {
|
||||||
|
return 0, validErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process last read error on io.ErrUnexpectedEOF.
|
||||||
|
if readErr != nil {
|
||||||
|
if readErr == io.EOF {
|
||||||
|
// current CBOR data item is incomplete.
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return 0, readErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// More data is needed and there was no read error.
|
||||||
|
var n int
|
||||||
|
for n == 0 {
|
||||||
|
n, readErr = dec.read()
|
||||||
|
if n == 0 && readErr != nil {
|
||||||
|
// No more data can be read and read error is encountered.
|
||||||
|
// At this point, validErr is either nil or io.ErrUnexpectedEOF.
|
||||||
|
if readErr == io.EOF {
|
||||||
|
if validErr == io.ErrUnexpectedEOF {
|
||||||
|
// current CBOR data item is incomplete.
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, readErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// At this point, dec.buf contains new data from last read (n > 0).
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// read() reads data from Reader to buffer.
|
||||||
|
// It returns number of bytes read and any read error encountered.
|
||||||
|
// Postconditions:
|
||||||
|
// - dec.buf contains previously unread data and new data.
|
||||||
|
// - dec.off is 0.
|
||||||
|
func (dec *Decoder) read() (int, error) {
|
||||||
|
// Grow buf if needed.
|
||||||
|
const minRead = 512
|
||||||
|
if cap(dec.buf)-len(dec.buf)+dec.off < minRead {
|
||||||
|
oldUnreadBuf := dec.buf[dec.off:]
|
||||||
|
dec.buf = make([]byte, len(dec.buf)-dec.off, 2*cap(dec.buf)+minRead)
|
||||||
|
dec.overwriteBuf(oldUnreadBuf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy unread data over read data and reset off to 0.
|
||||||
|
if dec.off > 0 {
|
||||||
|
dec.overwriteBuf(dec.buf[dec.off:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read from reader and reslice buf.
|
||||||
|
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
|
||||||
|
dec.buf = dec.buf[0 : len(dec.buf)+n]
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dec *Decoder) overwriteBuf(newBuf []byte) {
|
||||||
|
n := copy(dec.buf, newBuf)
|
||||||
|
dec.buf = dec.buf[:n]
|
||||||
|
dec.off = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encoder writes CBOR values to io.Writer.
|
||||||
|
type Encoder struct {
|
||||||
|
w io.Writer
|
||||||
|
em *encMode
|
||||||
|
indefTypes []cborType
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder returns a new encoder that writes to w using the default encoding options.
|
||||||
|
func NewEncoder(w io.Writer) *Encoder {
|
||||||
|
return defaultEncMode.NewEncoder(w)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode writes the CBOR encoding of v.
|
||||||
|
func (enc *Encoder) Encode(v interface{}) error {
|
||||||
|
if len(enc.indefTypes) > 0 && v != nil {
|
||||||
|
indefType := enc.indefTypes[len(enc.indefTypes)-1]
|
||||||
|
if indefType == cborTypeTextString {
|
||||||
|
k := reflect.TypeOf(v).Kind()
|
||||||
|
if k != reflect.String {
|
||||||
|
return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length text string")
|
||||||
|
}
|
||||||
|
} else if indefType == cborTypeByteString {
|
||||||
|
t := reflect.TypeOf(v)
|
||||||
|
k := t.Kind()
|
||||||
|
if (k != reflect.Array && k != reflect.Slice) || t.Elem().Kind() != reflect.Uint8 {
|
||||||
|
return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length byte string")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := getEncodeBuffer()
|
||||||
|
|
||||||
|
err := encode(buf, enc.em, reflect.ValueOf(v))
|
||||||
|
if err == nil {
|
||||||
|
_, err = enc.w.Write(buf.Bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
putEncodeBuffer(buf)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartIndefiniteByteString starts byte string encoding of indefinite length.
|
||||||
|
// Subsequent calls of (*Encoder).Encode() encodes definite length byte strings
|
||||||
|
// ("chunks") as one contiguous string until EndIndefinite is called.
|
||||||
|
func (enc *Encoder) StartIndefiniteByteString() error {
|
||||||
|
return enc.startIndefinite(cborTypeByteString)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartIndefiniteTextString starts text string encoding of indefinite length.
|
||||||
|
// Subsequent calls of (*Encoder).Encode() encodes definite length text strings
|
||||||
|
// ("chunks") as one contiguous string until EndIndefinite is called.
|
||||||
|
func (enc *Encoder) StartIndefiniteTextString() error {
|
||||||
|
return enc.startIndefinite(cborTypeTextString)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartIndefiniteArray starts array encoding of indefinite length.
|
||||||
|
// Subsequent calls of (*Encoder).Encode() encodes elements of the array
|
||||||
|
// until EndIndefinite is called.
|
||||||
|
func (enc *Encoder) StartIndefiniteArray() error {
|
||||||
|
return enc.startIndefinite(cborTypeArray)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartIndefiniteMap starts array encoding of indefinite length.
|
||||||
|
// Subsequent calls of (*Encoder).Encode() encodes elements of the map
|
||||||
|
// until EndIndefinite is called.
|
||||||
|
func (enc *Encoder) StartIndefiniteMap() error {
|
||||||
|
return enc.startIndefinite(cborTypeMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndIndefinite closes last opened indefinite length value.
|
||||||
|
func (enc *Encoder) EndIndefinite() error {
|
||||||
|
if len(enc.indefTypes) == 0 {
|
||||||
|
return errors.New("cbor: cannot encode \"break\" code outside indefinite length values")
|
||||||
|
}
|
||||||
|
_, err := enc.w.Write([]byte{cborBreakFlag})
|
||||||
|
if err == nil {
|
||||||
|
enc.indefTypes = enc.indefTypes[:len(enc.indefTypes)-1]
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var cborIndefHeader = map[cborType][]byte{
|
||||||
|
cborTypeByteString: {cborByteStringWithIndefiniteLengthHead},
|
||||||
|
cborTypeTextString: {cborTextStringWithIndefiniteLengthHead},
|
||||||
|
cborTypeArray: {cborArrayWithIndefiniteLengthHead},
|
||||||
|
cborTypeMap: {cborMapWithIndefiniteLengthHead},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (enc *Encoder) startIndefinite(typ cborType) error {
|
||||||
|
if enc.em.indefLength == IndefLengthForbidden {
|
||||||
|
return &IndefiniteLengthError{typ}
|
||||||
|
}
|
||||||
|
_, err := enc.w.Write(cborIndefHeader[typ])
|
||||||
|
if err == nil {
|
||||||
|
enc.indefTypes = append(enc.indefTypes, typ)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RawMessage is a raw encoded CBOR value.
|
||||||
|
type RawMessage []byte
|
||||||
|
|
||||||
|
// MarshalCBOR returns m or CBOR nil if m is nil.
|
||||||
|
func (m RawMessage) MarshalCBOR() ([]byte, error) {
|
||||||
|
if len(m) == 0 {
|
||||||
|
return cborNil, nil
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalCBOR creates a copy of data and saves to *m.
|
||||||
|
func (m *RawMessage) UnmarshalCBOR(data []byte) error {
|
||||||
|
if m == nil {
|
||||||
|
return errors.New("cbor.RawMessage: UnmarshalCBOR on nil pointer")
|
||||||
|
}
|
||||||
|
*m = append((*m)[0:0], data...)
|
||||||
|
return nil
|
||||||
|
}
|
260
api/vendor/github.com/fxamacker/cbor/v2/structfields.go
generated
vendored
Normal file
260
api/vendor/github.com/fxamacker/cbor/v2/structfields.go
generated
vendored
Normal file
@ -0,0 +1,260 @@
|
|||||||
|
// Copyright (c) Faye Amacker. All rights reserved.
|
||||||
|
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||||
|
|
||||||
|
package cbor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type field struct {
|
||||||
|
name string
|
||||||
|
nameAsInt int64 // used to decoder to match field name with CBOR int
|
||||||
|
cborName []byte
|
||||||
|
cborNameByteString []byte // major type 2 name encoding iff cborName has major type 3
|
||||||
|
idx []int
|
||||||
|
typ reflect.Type
|
||||||
|
ef encodeFunc
|
||||||
|
ief isEmptyFunc
|
||||||
|
typInfo *typeInfo // used to decoder to reuse type info
|
||||||
|
tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields)
|
||||||
|
omitEmpty bool // used to skip empty field
|
||||||
|
keyAsInt bool // used to encode/decode field name as int
|
||||||
|
}
|
||||||
|
|
||||||
|
type fields []*field
|
||||||
|
|
||||||
|
// indexFieldSorter sorts fields by field idx at each level, breaking ties with idx depth.
|
||||||
|
type indexFieldSorter struct {
|
||||||
|
fields fields
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *indexFieldSorter) Len() int {
|
||||||
|
return len(x.fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *indexFieldSorter) Swap(i, j int) {
|
||||||
|
x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *indexFieldSorter) Less(i, j int) bool {
|
||||||
|
iIdx, jIdx := x.fields[i].idx, x.fields[j].idx
|
||||||
|
for k := 0; k < len(iIdx) && k < len(jIdx); k++ {
|
||||||
|
if iIdx[k] != jIdx[k] {
|
||||||
|
return iIdx[k] < jIdx[k]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(iIdx) <= len(jIdx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// nameLevelAndTagFieldSorter sorts fields by field name, idx depth, and presence of tag.
|
||||||
|
type nameLevelAndTagFieldSorter struct {
|
||||||
|
fields fields
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *nameLevelAndTagFieldSorter) Len() int {
|
||||||
|
return len(x.fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *nameLevelAndTagFieldSorter) Swap(i, j int) {
|
||||||
|
x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *nameLevelAndTagFieldSorter) Less(i, j int) bool {
|
||||||
|
fi, fj := x.fields[i], x.fields[j]
|
||||||
|
if fi.name != fj.name {
|
||||||
|
return fi.name < fj.name
|
||||||
|
}
|
||||||
|
if len(fi.idx) != len(fj.idx) {
|
||||||
|
return len(fi.idx) < len(fj.idx)
|
||||||
|
}
|
||||||
|
if fi.tagged != fj.tagged {
|
||||||
|
return fi.tagged
|
||||||
|
}
|
||||||
|
return i < j // Field i and j have the same name, depth, and tagged status. Nothing else matters.
|
||||||
|
}
|
||||||
|
|
||||||
|
// getFields returns visible fields of struct type t following visibility rules for JSON encoding.
|
||||||
|
func getFields(t reflect.Type) (flds fields, structOptions string) {
|
||||||
|
// Get special field "_" tag options
|
||||||
|
if f, ok := t.FieldByName("_"); ok {
|
||||||
|
tag := f.Tag.Get("cbor")
|
||||||
|
if tag != "-" {
|
||||||
|
structOptions = tag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// nTypes contains next level anonymous fields' types and indexes
|
||||||
|
// (there can be multiple fields of the same type at the same level)
|
||||||
|
flds, nTypes := appendFields(t, nil, nil, nil)
|
||||||
|
|
||||||
|
if len(nTypes) > 0 {
|
||||||
|
|
||||||
|
var cTypes map[reflect.Type][][]int // current level anonymous fields' types and indexes
|
||||||
|
vTypes := map[reflect.Type]bool{t: true} // visited field types at less nested levels
|
||||||
|
|
||||||
|
for len(nTypes) > 0 {
|
||||||
|
cTypes, nTypes = nTypes, nil
|
||||||
|
|
||||||
|
for t, idx := range cTypes {
|
||||||
|
// If there are multiple anonymous fields of the same struct type at the same level, all are ignored.
|
||||||
|
if len(idx) > 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Anonymous field of the same type at deeper nested level is ignored.
|
||||||
|
if vTypes[t] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
vTypes[t] = true
|
||||||
|
|
||||||
|
flds, nTypes = appendFields(t, idx[0], flds, nTypes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Sort(&nameLevelAndTagFieldSorter{flds})
|
||||||
|
|
||||||
|
// Keep visible fields.
|
||||||
|
j := 0 // index of next unique field
|
||||||
|
for i := 0; i < len(flds); {
|
||||||
|
name := flds[i].name
|
||||||
|
if i == len(flds)-1 || // last field
|
||||||
|
name != flds[i+1].name || // field i has unique field name
|
||||||
|
len(flds[i].idx) < len(flds[i+1].idx) || // field i is at a less nested level than field i+1
|
||||||
|
(flds[i].tagged && !flds[i+1].tagged) { // field i is tagged while field i+1 is not
|
||||||
|
flds[j] = flds[i]
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip fields with the same field name.
|
||||||
|
for i++; i < len(flds) && name == flds[i].name; i++ { //nolint:revive
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if j != len(flds) {
|
||||||
|
flds = flds[:j]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort fields by field index
|
||||||
|
sort.Sort(&indexFieldSorter{flds})
|
||||||
|
|
||||||
|
return flds, structOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendFields appends type t's exportable fields to flds and anonymous struct fields to nTypes .
|
||||||
|
func appendFields(
|
||||||
|
t reflect.Type,
|
||||||
|
idx []int,
|
||||||
|
flds fields,
|
||||||
|
nTypes map[reflect.Type][][]int,
|
||||||
|
) (
|
||||||
|
_flds fields,
|
||||||
|
_nTypes map[reflect.Type][][]int,
|
||||||
|
) {
|
||||||
|
for i := 0; i < t.NumField(); i++ {
|
||||||
|
f := t.Field(i)
|
||||||
|
|
||||||
|
ft := f.Type
|
||||||
|
for ft.Kind() == reflect.Ptr {
|
||||||
|
ft = ft.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isFieldExportable(f, ft.Kind()) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
tag := f.Tag.Get("cbor")
|
||||||
|
if tag == "" {
|
||||||
|
tag = f.Tag.Get("json")
|
||||||
|
}
|
||||||
|
if tag == "-" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
tagged := tag != ""
|
||||||
|
|
||||||
|
// Parse field tag options
|
||||||
|
var tagFieldName string
|
||||||
|
var omitempty, keyasint bool
|
||||||
|
for j := 0; tag != ""; j++ {
|
||||||
|
var token string
|
||||||
|
idx := strings.IndexByte(tag, ',')
|
||||||
|
if idx == -1 {
|
||||||
|
token, tag = tag, ""
|
||||||
|
} else {
|
||||||
|
token, tag = tag[:idx], tag[idx+1:]
|
||||||
|
}
|
||||||
|
if j == 0 {
|
||||||
|
tagFieldName = token
|
||||||
|
} else {
|
||||||
|
switch token {
|
||||||
|
case "omitempty":
|
||||||
|
omitempty = true
|
||||||
|
case "keyasint":
|
||||||
|
keyasint = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldName := tagFieldName
|
||||||
|
if tagFieldName == "" {
|
||||||
|
fieldName = f.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
fIdx := make([]int, len(idx)+1)
|
||||||
|
copy(fIdx, idx)
|
||||||
|
fIdx[len(fIdx)-1] = i
|
||||||
|
|
||||||
|
if !f.Anonymous || ft.Kind() != reflect.Struct || tagFieldName != "" {
|
||||||
|
flds = append(flds, &field{
|
||||||
|
name: fieldName,
|
||||||
|
idx: fIdx,
|
||||||
|
typ: f.Type,
|
||||||
|
omitEmpty: omitempty,
|
||||||
|
keyAsInt: keyasint,
|
||||||
|
tagged: tagged})
|
||||||
|
} else {
|
||||||
|
if nTypes == nil {
|
||||||
|
nTypes = make(map[reflect.Type][][]int)
|
||||||
|
}
|
||||||
|
nTypes[ft] = append(nTypes[ft], fIdx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return flds, nTypes
|
||||||
|
}
|
||||||
|
|
||||||
|
// isFieldExportable returns true if f is an exportable (regular or anonymous) field or
|
||||||
|
// a nonexportable anonymous field of struct type.
|
||||||
|
// Nonexportable anonymous field of struct type can contain exportable fields.
|
||||||
|
func isFieldExportable(f reflect.StructField, fk reflect.Kind) bool { //nolint:gocritic // ignore hugeParam
|
||||||
|
exportable := f.PkgPath == ""
|
||||||
|
return exportable || (f.Anonymous && fk == reflect.Struct)
|
||||||
|
}
|
||||||
|
|
||||||
|
type embeddedFieldNullPtrFunc func(reflect.Value) (reflect.Value, error)
|
||||||
|
|
||||||
|
// getFieldValue returns field value of struct v by index. When encountering null pointer
|
||||||
|
// to anonymous (embedded) struct field, f is called with the last traversed field value.
|
||||||
|
func getFieldValue(v reflect.Value, idx []int, f embeddedFieldNullPtrFunc) (fv reflect.Value, err error) {
|
||||||
|
fv = v
|
||||||
|
for i, n := range idx {
|
||||||
|
fv = fv.Field(n)
|
||||||
|
|
||||||
|
if i < len(idx)-1 {
|
||||||
|
if fv.Kind() == reflect.Ptr && fv.Type().Elem().Kind() == reflect.Struct {
|
||||||
|
if fv.IsNil() {
|
||||||
|
// Null pointer to embedded struct field
|
||||||
|
fv, err = f(fv)
|
||||||
|
if err != nil || !fv.IsValid() {
|
||||||
|
return fv, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fv = fv.Elem()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fv, nil
|
||||||
|
}
|
299
api/vendor/github.com/fxamacker/cbor/v2/tag.go
generated
vendored
Normal file
299
api/vendor/github.com/fxamacker/cbor/v2/tag.go
generated
vendored
Normal file
@ -0,0 +1,299 @@
|
|||||||
|
package cbor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tag represents CBOR tag data, including tag number and unmarshaled tag content. Marshaling and
|
||||||
|
// unmarshaling of tag content is subject to any encode and decode options that would apply to
|
||||||
|
// enclosed data item if it were to appear outside of a tag.
|
||||||
|
type Tag struct {
|
||||||
|
Number uint64
|
||||||
|
Content interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RawTag represents CBOR tag data, including tag number and raw tag content.
|
||||||
|
// RawTag implements Unmarshaler and Marshaler interfaces.
|
||||||
|
type RawTag struct {
|
||||||
|
Number uint64
|
||||||
|
Content RawMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalCBOR sets *t with tag number and raw tag content copied from data.
|
||||||
|
func (t *RawTag) UnmarshalCBOR(data []byte) error {
|
||||||
|
if t == nil {
|
||||||
|
return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decoding CBOR null and undefined to cbor.RawTag is no-op.
|
||||||
|
if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
d := decoder{data: data, dm: defaultDecMode}
|
||||||
|
|
||||||
|
// Unmarshal tag number.
|
||||||
|
typ, _, num := d.getHead()
|
||||||
|
if typ != cborTypeTag {
|
||||||
|
return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeRawTag.String()}
|
||||||
|
}
|
||||||
|
t.Number = num
|
||||||
|
|
||||||
|
// Unmarshal tag content.
|
||||||
|
c := d.data[d.off:]
|
||||||
|
t.Content = make([]byte, len(c))
|
||||||
|
copy(t.Content, c)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalCBOR returns CBOR encoding of t.
|
||||||
|
func (t RawTag) MarshalCBOR() ([]byte, error) {
|
||||||
|
if t.Number == 0 && len(t.Content) == 0 {
|
||||||
|
// Marshal uninitialized cbor.RawTag
|
||||||
|
b := make([]byte, len(cborNil))
|
||||||
|
copy(b, cborNil)
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
e := getEncodeBuffer()
|
||||||
|
|
||||||
|
encodeHead(e, byte(cborTypeTag), t.Number)
|
||||||
|
|
||||||
|
content := t.Content
|
||||||
|
if len(content) == 0 {
|
||||||
|
content = cborNil
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, len(e.Bytes())+len(content))
|
||||||
|
n := copy(buf, e.Bytes())
|
||||||
|
copy(buf[n:], content)
|
||||||
|
|
||||||
|
putEncodeBuffer(e)
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecTagMode specifies how decoder handles tag number.
|
||||||
|
type DecTagMode int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DecTagIgnored makes decoder ignore tag number (skips if present).
|
||||||
|
DecTagIgnored DecTagMode = iota
|
||||||
|
|
||||||
|
// DecTagOptional makes decoder verify tag number if it's present.
|
||||||
|
DecTagOptional
|
||||||
|
|
||||||
|
// DecTagRequired makes decoder verify tag number and tag number must be present.
|
||||||
|
DecTagRequired
|
||||||
|
|
||||||
|
maxDecTagMode
|
||||||
|
)
|
||||||
|
|
||||||
|
func (dtm DecTagMode) valid() bool {
|
||||||
|
return dtm >= 0 && dtm < maxDecTagMode
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncTagMode specifies how encoder handles tag number.
|
||||||
|
type EncTagMode int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// EncTagNone makes encoder not encode tag number.
|
||||||
|
EncTagNone EncTagMode = iota
|
||||||
|
|
||||||
|
// EncTagRequired makes encoder encode tag number.
|
||||||
|
EncTagRequired
|
||||||
|
|
||||||
|
maxEncTagMode
|
||||||
|
)
|
||||||
|
|
||||||
|
func (etm EncTagMode) valid() bool {
|
||||||
|
return etm >= 0 && etm < maxEncTagMode
|
||||||
|
}
|
||||||
|
|
||||||
|
// TagOptions specifies how encoder and decoder handle tag number.
|
||||||
|
type TagOptions struct {
|
||||||
|
DecTag DecTagMode
|
||||||
|
EncTag EncTagMode
|
||||||
|
}
|
||||||
|
|
||||||
|
// TagSet is an interface to add and remove tag info. It is used by EncMode and DecMode
|
||||||
|
// to provide CBOR tag support.
|
||||||
|
type TagSet interface {
|
||||||
|
// Add adds given tag number(s), content type, and tag options to TagSet.
|
||||||
|
Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error
|
||||||
|
|
||||||
|
// Remove removes given tag content type from TagSet.
|
||||||
|
Remove(contentType reflect.Type)
|
||||||
|
|
||||||
|
tagProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
type tagProvider interface {
|
||||||
|
getTagItemFromType(t reflect.Type) *tagItem
|
||||||
|
getTypeFromTagNum(num []uint64) reflect.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
type tagItem struct {
|
||||||
|
num []uint64
|
||||||
|
cborTagNum []byte
|
||||||
|
contentType reflect.Type
|
||||||
|
opts TagOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tagItem) equalTagNum(num []uint64) bool {
|
||||||
|
// Fast path to compare 1 tag number
|
||||||
|
if len(t.num) == 1 && len(num) == 1 && t.num[0] == num[0] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(t.num) != len(num) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(t.num); i++ {
|
||||||
|
if t.num[i] != num[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
tagSet map[reflect.Type]*tagItem
|
||||||
|
|
||||||
|
syncTagSet struct {
|
||||||
|
sync.RWMutex
|
||||||
|
t tagSet
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t tagSet) getTagItemFromType(typ reflect.Type) *tagItem {
|
||||||
|
return t[typ]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tagSet) getTypeFromTagNum(num []uint64) reflect.Type {
|
||||||
|
for typ, tag := range t {
|
||||||
|
if tag.equalTagNum(num) {
|
||||||
|
return typ
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTagSet returns TagSet (safe for concurrency).
|
||||||
|
func NewTagSet() TagSet {
|
||||||
|
return &syncTagSet{t: make(map[reflect.Type]*tagItem)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds given tag number(s), content type, and tag options to TagSet.
|
||||||
|
func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error {
|
||||||
|
if contentType == nil {
|
||||||
|
return errors.New("cbor: cannot add nil content type to TagSet")
|
||||||
|
}
|
||||||
|
for contentType.Kind() == reflect.Ptr {
|
||||||
|
contentType = contentType.Elem()
|
||||||
|
}
|
||||||
|
tag, err := newTagItem(opts, contentType, num, nestedNum...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.Lock()
|
||||||
|
defer t.Unlock()
|
||||||
|
for typ, ti := range t.t {
|
||||||
|
if typ == contentType {
|
||||||
|
return errors.New("cbor: content type " + contentType.String() + " already exists in TagSet")
|
||||||
|
}
|
||||||
|
if ti.equalTagNum(tag.num) {
|
||||||
|
return fmt.Errorf("cbor: tag number %v already exists in TagSet", tag.num)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.t[contentType] = tag
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes given tag content type from TagSet.
|
||||||
|
func (t *syncTagSet) Remove(contentType reflect.Type) {
|
||||||
|
for contentType.Kind() == reflect.Ptr {
|
||||||
|
contentType = contentType.Elem()
|
||||||
|
}
|
||||||
|
t.Lock()
|
||||||
|
delete(t.t, contentType)
|
||||||
|
t.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *syncTagSet) getTagItemFromType(typ reflect.Type) *tagItem {
|
||||||
|
t.RLock()
|
||||||
|
ti := t.t[typ]
|
||||||
|
t.RUnlock()
|
||||||
|
return ti
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *syncTagSet) getTypeFromTagNum(num []uint64) reflect.Type {
|
||||||
|
t.RLock()
|
||||||
|
rt := t.t.getTypeFromTagNum(num)
|
||||||
|
t.RUnlock()
|
||||||
|
return rt
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTagItem(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) (*tagItem, error) {
|
||||||
|
if opts.DecTag == DecTagIgnored && opts.EncTag == EncTagNone {
|
||||||
|
return nil, errors.New("cbor: cannot add tag with DecTagIgnored and EncTagNone options to TagSet")
|
||||||
|
}
|
||||||
|
if contentType.PkgPath() == "" || contentType.Kind() == reflect.Interface {
|
||||||
|
return nil, errors.New("cbor: can only add named types to TagSet, got " + contentType.String())
|
||||||
|
}
|
||||||
|
if contentType == typeTime {
|
||||||
|
return nil, errors.New("cbor: cannot add time.Time to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead")
|
||||||
|
}
|
||||||
|
if contentType == typeBigInt {
|
||||||
|
return nil, errors.New("cbor: cannot add big.Int to TagSet, it's built-in and supported automatically")
|
||||||
|
}
|
||||||
|
if contentType == typeTag {
|
||||||
|
return nil, errors.New("cbor: cannot add cbor.Tag to TagSet")
|
||||||
|
}
|
||||||
|
if contentType == typeRawTag {
|
||||||
|
return nil, errors.New("cbor: cannot add cbor.RawTag to TagSet")
|
||||||
|
}
|
||||||
|
if num == 0 || num == 1 {
|
||||||
|
return nil, errors.New("cbor: cannot add tag number 0 or 1 to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead")
|
||||||
|
}
|
||||||
|
if num == 2 || num == 3 {
|
||||||
|
return nil, errors.New("cbor: cannot add tag number 2 or 3 to TagSet, it's built-in and supported automatically")
|
||||||
|
}
|
||||||
|
if num == tagNumSelfDescribedCBOR {
|
||||||
|
return nil, errors.New("cbor: cannot add tag number 55799 to TagSet, it's built-in and ignored automatically")
|
||||||
|
}
|
||||||
|
|
||||||
|
te := tagItem{num: []uint64{num}, opts: opts, contentType: contentType}
|
||||||
|
te.num = append(te.num, nestedNum...)
|
||||||
|
|
||||||
|
// Cache encoded tag numbers
|
||||||
|
e := getEncodeBuffer()
|
||||||
|
for _, n := range te.num {
|
||||||
|
encodeHead(e, byte(cborTypeTag), n)
|
||||||
|
}
|
||||||
|
te.cborTagNum = make([]byte, e.Len())
|
||||||
|
copy(te.cborTagNum, e.Bytes())
|
||||||
|
putEncodeBuffer(e)
|
||||||
|
|
||||||
|
return &te, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
typeTag = reflect.TypeOf(Tag{})
|
||||||
|
typeRawTag = reflect.TypeOf(RawTag{})
|
||||||
|
)
|
||||||
|
|
||||||
|
// WrongTagError describes mismatch between CBOR tag and registered tag.
|
||||||
|
type WrongTagError struct {
|
||||||
|
RegisteredType reflect.Type
|
||||||
|
RegisteredTagNum []uint64
|
||||||
|
TagNum []uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *WrongTagError) Error() string {
|
||||||
|
return fmt.Sprintf("cbor: wrong tag number for %s, got %v, expected %v", e.RegisteredType.String(), e.TagNum, e.RegisteredTagNum)
|
||||||
|
}
|
394
api/vendor/github.com/fxamacker/cbor/v2/valid.go
generated
vendored
Normal file
394
api/vendor/github.com/fxamacker/cbor/v2/valid.go
generated
vendored
Normal file
@ -0,0 +1,394 @@
|
|||||||
|
// Copyright (c) Faye Amacker. All rights reserved.
|
||||||
|
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||||
|
|
||||||
|
package cbor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/x448/float16"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SyntaxError is a description of a CBOR syntax error.
|
||||||
|
type SyntaxError struct {
|
||||||
|
msg string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *SyntaxError) Error() string { return e.msg }
|
||||||
|
|
||||||
|
// SemanticError is a description of a CBOR semantic error.
|
||||||
|
type SemanticError struct {
|
||||||
|
msg string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *SemanticError) Error() string { return e.msg }
|
||||||
|
|
||||||
|
// MaxNestedLevelError indicates exceeded max nested level of any combination of CBOR arrays/maps/tags.
|
||||||
|
type MaxNestedLevelError struct {
|
||||||
|
maxNestedLevels int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *MaxNestedLevelError) Error() string {
|
||||||
|
return "cbor: exceeded max nested level " + strconv.Itoa(e.maxNestedLevels)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxArrayElementsError indicates exceeded max number of elements for CBOR arrays.
|
||||||
|
type MaxArrayElementsError struct {
|
||||||
|
maxArrayElements int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *MaxArrayElementsError) Error() string {
|
||||||
|
return "cbor: exceeded max number of elements " + strconv.Itoa(e.maxArrayElements) + " for CBOR array"
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxMapPairsError indicates exceeded max number of key-value pairs for CBOR maps.
|
||||||
|
type MaxMapPairsError struct {
|
||||||
|
maxMapPairs int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *MaxMapPairsError) Error() string {
|
||||||
|
return "cbor: exceeded max number of key-value pairs " + strconv.Itoa(e.maxMapPairs) + " for CBOR map"
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndefiniteLengthError indicates found disallowed indefinite length items.
|
||||||
|
type IndefiniteLengthError struct {
|
||||||
|
t cborType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *IndefiniteLengthError) Error() string {
|
||||||
|
return "cbor: indefinite-length " + e.t.String() + " isn't allowed"
|
||||||
|
}
|
||||||
|
|
||||||
|
// TagsMdError indicates found disallowed CBOR tags.
|
||||||
|
type TagsMdError struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *TagsMdError) Error() string {
|
||||||
|
return "cbor: CBOR tag isn't allowed"
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtraneousDataError indicates found extraneous data following well-formed CBOR data item.
|
||||||
|
type ExtraneousDataError struct {
|
||||||
|
numOfBytes int // number of bytes of extraneous data
|
||||||
|
index int // location of extraneous data
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ExtraneousDataError) Error() string {
|
||||||
|
return "cbor: " + strconv.Itoa(e.numOfBytes) + " bytes of extraneous data starting at index " + strconv.Itoa(e.index)
|
||||||
|
}
|
||||||
|
|
||||||
|
// wellformed checks whether the CBOR data item is well-formed.
|
||||||
|
// allowExtraData indicates if extraneous data is allowed after the CBOR data item.
|
||||||
|
// - use allowExtraData = true when using Decoder.Decode()
|
||||||
|
// - use allowExtraData = false when using Unmarshal()
|
||||||
|
func (d *decoder) wellformed(allowExtraData bool, checkBuiltinTags bool) error {
|
||||||
|
if len(d.data) == d.off {
|
||||||
|
return io.EOF
|
||||||
|
}
|
||||||
|
_, err := d.wellformedInternal(0, checkBuiltinTags)
|
||||||
|
if err == nil {
|
||||||
|
if !allowExtraData && d.off != len(d.data) {
|
||||||
|
err = &ExtraneousDataError{len(d.data) - d.off, d.off}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// wellformedInternal checks data's well-formedness and returns max depth and error.
|
||||||
|
func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, error) { //nolint:gocyclo
|
||||||
|
t, _, val, indefiniteLength, err := d.wellformedHeadWithIndefiniteLengthFlag()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch t {
|
||||||
|
case cborTypeByteString, cborTypeTextString:
|
||||||
|
if indefiniteLength {
|
||||||
|
if d.dm.indefLength == IndefLengthForbidden {
|
||||||
|
return 0, &IndefiniteLengthError{t}
|
||||||
|
}
|
||||||
|
return d.wellformedIndefiniteString(t, depth, checkBuiltinTags)
|
||||||
|
}
|
||||||
|
valInt := int(val)
|
||||||
|
if valInt < 0 {
|
||||||
|
// Detect integer overflow
|
||||||
|
return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, causing integer overflow")
|
||||||
|
}
|
||||||
|
if len(d.data)-d.off < valInt { // valInt+off may overflow integer
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
d.off += valInt
|
||||||
|
|
||||||
|
case cborTypeArray, cborTypeMap:
|
||||||
|
depth++
|
||||||
|
if depth > d.dm.maxNestedLevels {
|
||||||
|
return 0, &MaxNestedLevelError{d.dm.maxNestedLevels}
|
||||||
|
}
|
||||||
|
|
||||||
|
if indefiniteLength {
|
||||||
|
if d.dm.indefLength == IndefLengthForbidden {
|
||||||
|
return 0, &IndefiniteLengthError{t}
|
||||||
|
}
|
||||||
|
return d.wellformedIndefiniteArrayOrMap(t, depth, checkBuiltinTags)
|
||||||
|
}
|
||||||
|
|
||||||
|
valInt := int(val)
|
||||||
|
if valInt < 0 {
|
||||||
|
// Detect integer overflow
|
||||||
|
return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, it would cause integer overflow")
|
||||||
|
}
|
||||||
|
|
||||||
|
if t == cborTypeArray {
|
||||||
|
if valInt > d.dm.maxArrayElements {
|
||||||
|
return 0, &MaxArrayElementsError{d.dm.maxArrayElements}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if valInt > d.dm.maxMapPairs {
|
||||||
|
return 0, &MaxMapPairsError{d.dm.maxMapPairs}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
count := 1
|
||||||
|
if t == cborTypeMap {
|
||||||
|
count = 2
|
||||||
|
}
|
||||||
|
maxDepth := depth
|
||||||
|
for j := 0; j < count; j++ {
|
||||||
|
for i := 0; i < valInt; i++ {
|
||||||
|
var dpt int
|
||||||
|
if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if dpt > maxDepth {
|
||||||
|
maxDepth = dpt // Save max depth
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
depth = maxDepth
|
||||||
|
|
||||||
|
case cborTypeTag:
|
||||||
|
if d.dm.tagsMd == TagsForbidden {
|
||||||
|
return 0, &TagsMdError{}
|
||||||
|
}
|
||||||
|
|
||||||
|
tagNum := val
|
||||||
|
|
||||||
|
// Scan nested tag numbers to avoid recursion.
|
||||||
|
for {
|
||||||
|
if len(d.data) == d.off { // Tag number must be followed by tag content.
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if checkBuiltinTags {
|
||||||
|
err = validBuiltinTag(tagNum, d.data[d.off])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if d.dm.bignumTag == BignumTagForbidden && (tagNum == 2 || tagNum == 3) {
|
||||||
|
return 0, &UnacceptableDataItemError{
|
||||||
|
CBORType: cborTypeTag.String(),
|
||||||
|
Message: "bignum",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if getType(d.data[d.off]) != cborTypeTag {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if _, _, tagNum, err = d.wellformedHead(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
depth++
|
||||||
|
if depth > d.dm.maxNestedLevels {
|
||||||
|
return 0, &MaxNestedLevelError{d.dm.maxNestedLevels}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Check tag content.
|
||||||
|
return d.wellformedInternal(depth, checkBuiltinTags)
|
||||||
|
}
|
||||||
|
|
||||||
|
return depth, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// wellformedIndefiniteString checks indefinite length byte/text string's well-formedness and returns max depth and error.
|
||||||
|
func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltinTags bool) (int, error) {
|
||||||
|
var err error
|
||||||
|
for {
|
||||||
|
if len(d.data) == d.off {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if isBreakFlag(d.data[d.off]) {
|
||||||
|
d.off++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Peek ahead to get next type and indefinite length status.
|
||||||
|
nt, ai := parseInitialByte(d.data[d.off])
|
||||||
|
if t != nt {
|
||||||
|
return 0, &SyntaxError{"cbor: wrong element type " + nt.String() + " for indefinite-length " + t.String()}
|
||||||
|
}
|
||||||
|
if additionalInformation(ai).isIndefiniteLength() {
|
||||||
|
return 0, &SyntaxError{"cbor: indefinite-length " + t.String() + " chunk is not definite-length"}
|
||||||
|
}
|
||||||
|
if depth, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return depth, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// wellformedIndefiniteArrayOrMap checks indefinite length array/map's well-formedness and returns max depth and error.
|
||||||
|
func (d *decoder) wellformedIndefiniteArrayOrMap(t cborType, depth int, checkBuiltinTags bool) (int, error) {
|
||||||
|
var err error
|
||||||
|
maxDepth := depth
|
||||||
|
i := 0
|
||||||
|
for {
|
||||||
|
if len(d.data) == d.off {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if isBreakFlag(d.data[d.off]) {
|
||||||
|
d.off++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
var dpt int
|
||||||
|
if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if dpt > maxDepth {
|
||||||
|
maxDepth = dpt
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
if t == cborTypeArray {
|
||||||
|
if i > d.dm.maxArrayElements {
|
||||||
|
return 0, &MaxArrayElementsError{d.dm.maxArrayElements}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if i%2 == 0 && i/2 > d.dm.maxMapPairs {
|
||||||
|
return 0, &MaxMapPairsError{d.dm.maxMapPairs}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if t == cborTypeMap && i%2 == 1 {
|
||||||
|
return 0, &SyntaxError{"cbor: unexpected \"break\" code"}
|
||||||
|
}
|
||||||
|
return maxDepth, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) wellformedHeadWithIndefiniteLengthFlag() (
|
||||||
|
t cborType,
|
||||||
|
ai byte,
|
||||||
|
val uint64,
|
||||||
|
indefiniteLength bool,
|
||||||
|
err error,
|
||||||
|
) {
|
||||||
|
t, ai, val, err = d.wellformedHead()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
indefiniteLength = additionalInformation(ai).isIndefiniteLength()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) {
|
||||||
|
dataLen := len(d.data) - d.off
|
||||||
|
if dataLen == 0 {
|
||||||
|
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
|
||||||
|
t, ai = parseInitialByte(d.data[d.off])
|
||||||
|
val = uint64(ai)
|
||||||
|
d.off++
|
||||||
|
dataLen--
|
||||||
|
|
||||||
|
if ai <= maxAdditionalInformationWithoutArgument {
|
||||||
|
return t, ai, val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai == additionalInformationWith1ByteArgument {
|
||||||
|
const argumentSize = 1
|
||||||
|
if dataLen < argumentSize {
|
||||||
|
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
val = uint64(d.data[d.off])
|
||||||
|
d.off++
|
||||||
|
if t == cborTypePrimitives && val < 32 {
|
||||||
|
return 0, 0, 0, &SyntaxError{"cbor: invalid simple value " + strconv.Itoa(int(val)) + " for type " + t.String()}
|
||||||
|
}
|
||||||
|
return t, ai, val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai == additionalInformationWith2ByteArgument {
|
||||||
|
const argumentSize = 2
|
||||||
|
if dataLen < argumentSize {
|
||||||
|
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize]))
|
||||||
|
d.off += argumentSize
|
||||||
|
if t == cborTypePrimitives {
|
||||||
|
if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil {
|
||||||
|
return 0, 0, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return t, ai, val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai == additionalInformationWith4ByteArgument {
|
||||||
|
const argumentSize = 4
|
||||||
|
if dataLen < argumentSize {
|
||||||
|
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize]))
|
||||||
|
d.off += argumentSize
|
||||||
|
if t == cborTypePrimitives {
|
||||||
|
if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil {
|
||||||
|
return 0, 0, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return t, ai, val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai == additionalInformationWith8ByteArgument {
|
||||||
|
const argumentSize = 8
|
||||||
|
if dataLen < argumentSize {
|
||||||
|
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize])
|
||||||
|
d.off += argumentSize
|
||||||
|
if t == cborTypePrimitives {
|
||||||
|
if err := d.acceptableFloat(math.Float64frombits(val)); err != nil {
|
||||||
|
return 0, 0, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return t, ai, val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if additionalInformation(ai).isIndefiniteLength() {
|
||||||
|
switch t {
|
||||||
|
case cborTypePositiveInt, cborTypeNegativeInt, cborTypeTag:
|
||||||
|
return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()}
|
||||||
|
case cborTypePrimitives: // 0xff (break code) should not be outside wellformedIndefinite().
|
||||||
|
return 0, 0, 0, &SyntaxError{"cbor: unexpected \"break\" code"}
|
||||||
|
}
|
||||||
|
return t, ai, val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ai == 28, 29, 30
|
||||||
|
return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) acceptableFloat(f float64) error {
|
||||||
|
switch {
|
||||||
|
case d.dm.nanDec == NaNDecodeForbidden && math.IsNaN(f):
|
||||||
|
return &UnacceptableDataItemError{
|
||||||
|
CBORType: cborTypePrimitives.String(),
|
||||||
|
Message: "floating-point NaN",
|
||||||
|
}
|
||||||
|
case d.dm.infDec == InfDecodeForbidden && math.IsInf(f, 0):
|
||||||
|
return &UnacceptableDataItemError{
|
||||||
|
CBORType: cborTypePrimitives.String(),
|
||||||
|
Message: "floating-point infinity",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
1
api/vendor/github.com/go-logr/logr/README.md
generated
vendored
1
api/vendor/github.com/go-logr/logr/README.md
generated
vendored
@ -1,6 +1,7 @@
|
|||||||
# A minimal logging API for Go
|
# A minimal logging API for Go
|
||||||
|
|
||||||
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr)
|
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr)
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/github.com/go-logr/logr)](https://goreportcard.com/report/github.com/go-logr/logr)
|
||||||
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
|
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
|
||||||
|
|
||||||
logr offers an(other) opinion on how Go programs and libraries can do logging
|
logr offers an(other) opinion on how Go programs and libraries can do logging
|
||||||
|
13
api/vendor/github.com/x448/float16/.travis.yml
generated
vendored
Normal file
13
api/vendor/github.com/x448/float16/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.11.x
|
||||||
|
|
||||||
|
env:
|
||||||
|
- GO111MODULE=on
|
||||||
|
|
||||||
|
script:
|
||||||
|
- go test -short -coverprofile=coverage.txt -covermode=count ./...
|
||||||
|
|
||||||
|
after_success:
|
||||||
|
- bash <(curl -s https://codecov.io/bash)
|
22
api/vendor/github.com/x448/float16/LICENSE
generated
vendored
Normal file
22
api/vendor/github.com/x448/float16/LICENSE
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
|
133
api/vendor/github.com/x448/float16/README.md
generated
vendored
Normal file
133
api/vendor/github.com/x448/float16/README.md
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
|||||||
|
# Float16 (Binary16) in Go/Golang
|
||||||
|
[![Build Status](https://travis-ci.org/x448/float16.svg?branch=master)](https://travis-ci.org/x448/float16)
|
||||||
|
[![codecov](https://codecov.io/gh/x448/float16/branch/master/graph/badge.svg?v=4)](https://codecov.io/gh/x448/float16)
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/github.com/x448/float16)](https://goreportcard.com/report/github.com/x448/float16)
|
||||||
|
[![Release](https://img.shields.io/github/release/x448/float16.svg?style=flat-square)](https://github.com/x448/float16/releases)
|
||||||
|
[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/x448/float16/master/LICENSE)
|
||||||
|
|
||||||
|
`float16` package provides [IEEE 754 half-precision floating-point format (binary16)](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) with IEEE 754 default rounding for conversions. IEEE 754-2008 refers to this 16-bit floating-point format as binary16.
|
||||||
|
|
||||||
|
IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven") is considered the most accurate and statistically unbiased estimate of the true result.
|
||||||
|
|
||||||
|
All possible 4+ billion floating-point conversions with this library are verified to be correct.
|
||||||
|
|
||||||
|
Lowercase "float16" refers to IEEE 754 binary16. And capitalized "Float16" refers to exported Go data type provided by this library.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
Current features include:
|
||||||
|
|
||||||
|
* float16 to float32 conversions use lossless conversion.
|
||||||
|
* float32 to float16 conversions use IEEE 754-2008 "Round-to-Nearest RoundTiesToEven".
|
||||||
|
* conversions using pure Go take about 2.65 ns/op on a desktop amd64.
|
||||||
|
* unit tests provide 100% code coverage and check all possible 4+ billion conversions.
|
||||||
|
* other functions include: IsInf(), IsNaN(), IsNormal(), PrecisionFromfloat32(), String(), etc.
|
||||||
|
* all functions in this library use zero allocs except String().
|
||||||
|
|
||||||
|
## Status
|
||||||
|
This library is used by [fxamacker/cbor](https://github.com/fxamacker/cbor) and is ready for production use on supported platforms. The version number < 1.0 indicates more functions and options are planned but not yet published.
|
||||||
|
|
||||||
|
Current status:
|
||||||
|
|
||||||
|
* core API is done and breaking API changes are unlikely.
|
||||||
|
* 100% of unit tests pass:
|
||||||
|
* short mode (`go test -short`) tests around 65765 conversions in 0.005s.
|
||||||
|
* normal mode (`go test`) tests all possible 4+ billion conversions in about 95s.
|
||||||
|
* 100% code coverage with both short mode and normal mode.
|
||||||
|
* tested on amd64 but it should work on all little-endian platforms supported by Go.
|
||||||
|
|
||||||
|
Roadmap:
|
||||||
|
|
||||||
|
* add functions for fast batch conversions leveraging SIMD when supported by hardware.
|
||||||
|
* speed up unit test when verifying all possible 4+ billion conversions.
|
||||||
|
* test on additional platforms.
|
||||||
|
|
||||||
|
## Float16 to Float32 Conversion
|
||||||
|
Conversions from float16 to float32 are lossless conversions. All 65536 possible float16 to float32 conversions (in pure Go) are confirmed to be correct.
|
||||||
|
|
||||||
|
Unit tests take a fraction of a second to check all 65536 expected values for float16 to float32 conversions.
|
||||||
|
|
||||||
|
## Float32 to Float16 Conversion
|
||||||
|
Conversions from float32 to float16 use IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven"). All 4294967296 possible float32 to float16 conversions (in pure Go) are confirmed to be correct.
|
||||||
|
|
||||||
|
Unit tests in normal mode take about 1-2 minutes to check all 4+ billion float32 input values and results for Fromfloat32(), FromNaN32ps(), and PrecisionFromfloat32().
|
||||||
|
|
||||||
|
Unit tests in short mode use a small subset (around 229 float32 inputs) and finish in under 0.01 second while still reaching 100% code coverage.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
Install with `go get github.com/x448/float16`.
|
||||||
|
```
|
||||||
|
// Convert float32 to float16
|
||||||
|
pi := float32(math.Pi)
|
||||||
|
pi16 := float16.Fromfloat32(pi)
|
||||||
|
|
||||||
|
// Convert float16 to float32
|
||||||
|
pi32 := pi16.Float32()
|
||||||
|
|
||||||
|
// PrecisionFromfloat32() is faster than the overhead of calling a function.
|
||||||
|
// This example only converts if there's no data loss and input is not a subnormal.
|
||||||
|
if float16.PrecisionFromfloat32(pi) == float16.PrecisionExact {
|
||||||
|
pi16 := float16.Fromfloat32(pi)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Float16 Type and API
|
||||||
|
Float16 (capitalized) is a Go type with uint16 as the underlying state. There are 6 exported functions and 9 exported methods.
|
||||||
|
```
|
||||||
|
package float16 // import "github.com/x448/float16"
|
||||||
|
|
||||||
|
// Exported types and consts
|
||||||
|
type Float16 uint16
|
||||||
|
const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN")
|
||||||
|
|
||||||
|
// Exported functions
|
||||||
|
Fromfloat32(f32 float32) Float16 // Float16 number converted from f32 using IEEE 754 default rounding
|
||||||
|
with identical results to AMD and Intel F16C hardware. NaN inputs
|
||||||
|
are converted with quiet bit always set on, to be like F16C.
|
||||||
|
|
||||||
|
FromNaN32ps(nan float32) (Float16, error) // Float16 NaN without modifying quiet bit.
|
||||||
|
// The "ps" suffix means "preserve signaling".
|
||||||
|
// Returns sNaN and ErrInvalidNaNValue if nan isn't a NaN.
|
||||||
|
|
||||||
|
Frombits(b16 uint16) Float16 // Float16 number corresponding to b16 (IEEE 754 binary16 rep.)
|
||||||
|
NaN() Float16 // Float16 of IEEE 754 binary16 not-a-number
|
||||||
|
Inf(sign int) Float16 // Float16 of IEEE 754 binary16 infinity according to sign
|
||||||
|
|
||||||
|
PrecisionFromfloat32(f32 float32) Precision // quickly indicates exact, ..., overflow, underflow
|
||||||
|
// (inline and < 1 ns/op)
|
||||||
|
// Exported methods
|
||||||
|
(f Float16) Float32() float32 // float32 number converted from f16 using lossless conversion
|
||||||
|
(f Float16) Bits() uint16 // the IEEE 754 binary16 representation of f
|
||||||
|
(f Float16) IsNaN() bool // true if f is not-a-number (NaN)
|
||||||
|
(f Float16) IsQuietNaN() bool // true if f is a quiet not-a-number (NaN)
|
||||||
|
(f Float16) IsInf(sign int) bool // true if f is infinite based on sign (-1=NegInf, 0=any, 1=PosInf)
|
||||||
|
(f Float16) IsFinite() bool // true if f is not infinite or NaN
|
||||||
|
(f Float16) IsNormal() bool // true if f is not zero, infinite, subnormal, or NaN.
|
||||||
|
(f Float16) Signbit() bool // true if f is negative or negative zero
|
||||||
|
(f Float16) String() string // string representation of f to satisfy fmt.Stringer interface
|
||||||
|
```
|
||||||
|
See [API](https://godoc.org/github.com/x448/float16) at godoc.org for more info.
|
||||||
|
|
||||||
|
## Benchmarks
|
||||||
|
Conversions (in pure Go) are around 2.65 ns/op for float16 -> float32 and float32 -> float16 on amd64. Speeds can vary depending on input value.
|
||||||
|
|
||||||
|
```
|
||||||
|
All functions have zero allocations except float16.String().
|
||||||
|
|
||||||
|
FromFloat32pi-2 2.59ns ± 0% // speed using Fromfloat32() to convert a float32 of math.Pi to Float16
|
||||||
|
ToFloat32pi-2 2.69ns ± 0% // speed using Float32() to convert a float16 of math.Pi to float32
|
||||||
|
Frombits-2 0.29ns ± 5% // speed using Frombits() to cast a uint16 to Float16
|
||||||
|
|
||||||
|
PrecisionFromFloat32-2 0.29ns ± 1% // speed using PrecisionFromfloat32() to check for overflows, etc.
|
||||||
|
```
|
||||||
|
|
||||||
|
## System Requirements
|
||||||
|
* Tested on Go 1.11, 1.12, and 1.13 but it should also work with older versions.
|
||||||
|
* Tested on amd64 but it should also work on all little-endian platforms supported by Go.
|
||||||
|
|
||||||
|
## Special Thanks
|
||||||
|
Special thanks to Kathryn Long (starkat99) for creating [half-rs](https://github.com/starkat99/half-rs), a very nice rust implementation of float16.
|
||||||
|
|
||||||
|
## License
|
||||||
|
Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
|
||||||
|
|
||||||
|
Licensed under [MIT License](LICENSE)
|
302
api/vendor/github.com/x448/float16/float16.go
generated
vendored
Normal file
302
api/vendor/github.com/x448/float16/float16.go
generated
vendored
Normal file
@ -0,0 +1,302 @@
|
|||||||
|
// Copyright 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
|
||||||
|
//
|
||||||
|
// Special thanks to Kathryn Long for her Rust implementation
|
||||||
|
// of float16 at github.com/starkat99/half-rs (MIT license)
|
||||||
|
|
||||||
|
package float16
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Float16 represents IEEE 754 half-precision floating-point numbers (binary16).
|
||||||
|
type Float16 uint16
|
||||||
|
|
||||||
|
// Precision indicates whether the conversion to Float16 is
|
||||||
|
// exact, subnormal without dropped bits, inexact, underflow, or overflow.
|
||||||
|
type Precision int
|
||||||
|
|
||||||
|
const (
|
||||||
|
|
||||||
|
// PrecisionExact is for non-subnormals that don't drop bits during conversion.
|
||||||
|
// All of these can round-trip. Should always convert to float16.
|
||||||
|
PrecisionExact Precision = iota
|
||||||
|
|
||||||
|
// PrecisionUnknown is for subnormals that don't drop bits during conversion but
|
||||||
|
// not all of these can round-trip so precision is unknown without more effort.
|
||||||
|
// Only 2046 of these can round-trip and the rest cannot round-trip.
|
||||||
|
PrecisionUnknown
|
||||||
|
|
||||||
|
// PrecisionInexact is for dropped significand bits and cannot round-trip.
|
||||||
|
// Some of these are subnormals. Cannot round-trip float32->float16->float32.
|
||||||
|
PrecisionInexact
|
||||||
|
|
||||||
|
// PrecisionUnderflow is for Underflows. Cannot round-trip float32->float16->float32.
|
||||||
|
PrecisionUnderflow
|
||||||
|
|
||||||
|
// PrecisionOverflow is for Overflows. Cannot round-trip float32->float16->float32.
|
||||||
|
PrecisionOverflow
|
||||||
|
)
|
||||||
|
|
||||||
|
// PrecisionFromfloat32 returns Precision without performing
|
||||||
|
// the conversion. Conversions from both Infinity and NaN
|
||||||
|
// values will always report PrecisionExact even if NaN payload
|
||||||
|
// or NaN-Quiet-Bit is lost. This function is kept simple to
|
||||||
|
// allow inlining and run < 0.5 ns/op, to serve as a fast filter.
|
||||||
|
func PrecisionFromfloat32(f32 float32) Precision {
|
||||||
|
u32 := math.Float32bits(f32)
|
||||||
|
|
||||||
|
if u32 == 0 || u32 == 0x80000000 {
|
||||||
|
// +- zero will always be exact conversion
|
||||||
|
return PrecisionExact
|
||||||
|
}
|
||||||
|
|
||||||
|
const COEFMASK uint32 = 0x7fffff // 23 least significant bits
|
||||||
|
const EXPSHIFT uint32 = 23
|
||||||
|
const EXPBIAS uint32 = 127
|
||||||
|
const EXPMASK uint32 = uint32(0xff) << EXPSHIFT
|
||||||
|
const DROPMASK uint32 = COEFMASK >> 10
|
||||||
|
|
||||||
|
exp := int32(((u32 & EXPMASK) >> EXPSHIFT) - EXPBIAS)
|
||||||
|
coef := u32 & COEFMASK
|
||||||
|
|
||||||
|
if exp == 128 {
|
||||||
|
// +- infinity or NaN
|
||||||
|
// apps may want to do extra checks for NaN separately
|
||||||
|
return PrecisionExact
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://en.wikipedia.org/wiki/Half-precision_floating-point_format says,
|
||||||
|
// "Decimals between 2^−24 (minimum positive subnormal) and 2^−14 (maximum subnormal): fixed interval 2^−24"
|
||||||
|
if exp < -24 {
|
||||||
|
return PrecisionUnderflow
|
||||||
|
}
|
||||||
|
if exp > 15 {
|
||||||
|
return PrecisionOverflow
|
||||||
|
}
|
||||||
|
if (coef & DROPMASK) != uint32(0) {
|
||||||
|
// these include subnormals and non-subnormals that dropped bits
|
||||||
|
return PrecisionInexact
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp < -14 {
|
||||||
|
// Subnormals. Caller may want to test these further.
|
||||||
|
// There are 2046 subnormals that can successfully round-trip f32->f16->f32
|
||||||
|
// and 20 of those 2046 have 32-bit input coef == 0.
|
||||||
|
// RFC 7049 and 7049bis Draft 12 don't precisely define "preserves value"
|
||||||
|
// so some protocols and libraries will choose to handle subnormals differently
|
||||||
|
// when deciding to encode them to CBOR float32 vs float16.
|
||||||
|
return PrecisionUnknown
|
||||||
|
}
|
||||||
|
|
||||||
|
return PrecisionExact
|
||||||
|
}
|
||||||
|
|
||||||
|
// Frombits returns the float16 number corresponding to the IEEE 754 binary16
|
||||||
|
// representation u16, with the sign bit of u16 and the result in the same bit
|
||||||
|
// position. Frombits(Bits(x)) == x.
|
||||||
|
func Frombits(u16 uint16) Float16 {
|
||||||
|
return Float16(u16)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fromfloat32 returns a Float16 value converted from f32. Conversion uses
|
||||||
|
// IEEE default rounding (nearest int, with ties to even).
|
||||||
|
func Fromfloat32(f32 float32) Float16 {
|
||||||
|
return Float16(f32bitsToF16bits(math.Float32bits(f32)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrInvalidNaNValue indicates a NaN was not received.
|
||||||
|
const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN")
|
||||||
|
|
||||||
|
type float16Error string
|
||||||
|
|
||||||
|
func (e float16Error) Error() string { return string(e) }
|
||||||
|
|
||||||
|
// FromNaN32ps converts nan to IEEE binary16 NaN while preserving both
|
||||||
|
// signaling and payload. Unlike Fromfloat32(), which can only return
|
||||||
|
// qNaN because it sets quiet bit = 1, this can return both sNaN and qNaN.
|
||||||
|
// If the result is infinity (sNaN with empty payload), then the
|
||||||
|
// lowest bit of payload is set to make the result a NaN.
|
||||||
|
// Returns ErrInvalidNaNValue and 0x7c01 (sNaN) if nan isn't IEEE 754 NaN.
|
||||||
|
// This function was kept simple to be able to inline.
|
||||||
|
func FromNaN32ps(nan float32) (Float16, error) {
|
||||||
|
const SNAN = Float16(uint16(0x7c01)) // signalling NaN
|
||||||
|
|
||||||
|
u32 := math.Float32bits(nan)
|
||||||
|
sign := u32 & 0x80000000
|
||||||
|
exp := u32 & 0x7f800000
|
||||||
|
coef := u32 & 0x007fffff
|
||||||
|
|
||||||
|
if (exp != 0x7f800000) || (coef == 0) {
|
||||||
|
return SNAN, ErrInvalidNaNValue
|
||||||
|
}
|
||||||
|
|
||||||
|
u16 := uint16((sign >> 16) | uint32(0x7c00) | (coef >> 13))
|
||||||
|
|
||||||
|
if (u16 & 0x03ff) == 0 {
|
||||||
|
// result became infinity, make it NaN by setting lowest bit in payload
|
||||||
|
u16 = u16 | 0x0001
|
||||||
|
}
|
||||||
|
|
||||||
|
return Float16(u16), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NaN returns a Float16 of IEEE 754 binary16 not-a-number (NaN).
|
||||||
|
// Returned NaN value 0x7e01 has all exponent bits = 1 with the
|
||||||
|
// first and last bits = 1 in the significand. This is consistent
|
||||||
|
// with Go's 64-bit math.NaN(). Canonical CBOR in RFC 7049 uses 0x7e00.
|
||||||
|
func NaN() Float16 {
|
||||||
|
return Float16(0x7e01)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inf returns a Float16 with an infinity value with the specified sign.
|
||||||
|
// A sign >= returns positive infinity.
|
||||||
|
// A sign < 0 returns negative infinity.
|
||||||
|
func Inf(sign int) Float16 {
|
||||||
|
if sign >= 0 {
|
||||||
|
return Float16(0x7c00)
|
||||||
|
}
|
||||||
|
return Float16(0x8000 | 0x7c00)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float32 returns a float32 converted from f (Float16).
|
||||||
|
// This is a lossless conversion.
|
||||||
|
func (f Float16) Float32() float32 {
|
||||||
|
u32 := f16bitsToF32bits(uint16(f))
|
||||||
|
return math.Float32frombits(u32)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bits returns the IEEE 754 binary16 representation of f, with the sign bit
|
||||||
|
// of f and the result in the same bit position. Bits(Frombits(x)) == x.
|
||||||
|
func (f Float16) Bits() uint16 {
|
||||||
|
return uint16(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNaN reports whether f is an IEEE 754 binary16 “not-a-number” value.
|
||||||
|
func (f Float16) IsNaN() bool {
|
||||||
|
return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsQuietNaN reports whether f is a quiet (non-signaling) IEEE 754 binary16
|
||||||
|
// “not-a-number” value.
|
||||||
|
func (f Float16) IsQuietNaN() bool {
|
||||||
|
return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0) && (f&0x0200 != 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsInf reports whether f is an infinity (inf).
|
||||||
|
// A sign > 0 reports whether f is positive inf.
|
||||||
|
// A sign < 0 reports whether f is negative inf.
|
||||||
|
// A sign == 0 reports whether f is either inf.
|
||||||
|
func (f Float16) IsInf(sign int) bool {
|
||||||
|
return ((f == 0x7c00) && sign >= 0) ||
|
||||||
|
(f == 0xfc00 && sign <= 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsFinite returns true if f is neither infinite nor NaN.
|
||||||
|
func (f Float16) IsFinite() bool {
|
||||||
|
return (uint16(f) & uint16(0x7c00)) != uint16(0x7c00)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNormal returns true if f is neither zero, infinite, subnormal, or NaN.
|
||||||
|
func (f Float16) IsNormal() bool {
|
||||||
|
exp := uint16(f) & uint16(0x7c00)
|
||||||
|
return (exp != uint16(0x7c00)) && (exp != 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Signbit reports whether f is negative or negative zero.
|
||||||
|
func (f Float16) Signbit() bool {
|
||||||
|
return (uint16(f) & uint16(0x8000)) != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// String satisfies the fmt.Stringer interface.
|
||||||
|
func (f Float16) String() string {
|
||||||
|
return strconv.FormatFloat(float64(f.Float32()), 'f', -1, 32)
|
||||||
|
}
|
||||||
|
|
||||||
|
// f16bitsToF32bits returns uint32 (float32 bits) converted from specified uint16.
|
||||||
|
func f16bitsToF32bits(in uint16) uint32 {
|
||||||
|
// All 65536 conversions with this were confirmed to be correct
|
||||||
|
// by Montgomery Edwards⁴⁴⁸ (github.com/x448).
|
||||||
|
|
||||||
|
sign := uint32(in&0x8000) << 16 // sign for 32-bit
|
||||||
|
exp := uint32(in&0x7c00) >> 10 // exponenent for 16-bit
|
||||||
|
coef := uint32(in&0x03ff) << 13 // significand for 32-bit
|
||||||
|
|
||||||
|
if exp == 0x1f {
|
||||||
|
if coef == 0 {
|
||||||
|
// infinity
|
||||||
|
return sign | 0x7f800000 | coef
|
||||||
|
}
|
||||||
|
// NaN
|
||||||
|
return sign | 0x7fc00000 | coef
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp == 0 {
|
||||||
|
if coef == 0 {
|
||||||
|
// zero
|
||||||
|
return sign
|
||||||
|
}
|
||||||
|
|
||||||
|
// normalize subnormal numbers
|
||||||
|
exp++
|
||||||
|
for coef&0x7f800000 == 0 {
|
||||||
|
coef <<= 1
|
||||||
|
exp--
|
||||||
|
}
|
||||||
|
coef &= 0x007fffff
|
||||||
|
}
|
||||||
|
|
||||||
|
return sign | ((exp + (0x7f - 0xf)) << 23) | coef
|
||||||
|
}
|
||||||
|
|
||||||
|
// f32bitsToF16bits returns uint16 (Float16 bits) converted from the specified float32.
|
||||||
|
// Conversion rounds to nearest integer with ties to even.
|
||||||
|
func f32bitsToF16bits(u32 uint32) uint16 {
|
||||||
|
// Translated from Rust to Go by Montgomery Edwards⁴⁴⁸ (github.com/x448).
|
||||||
|
// All 4294967296 conversions with this were confirmed to be correct by x448.
|
||||||
|
// Original Rust implementation is by Kathryn Long (github.com/starkat99) with MIT license.
|
||||||
|
|
||||||
|
sign := u32 & 0x80000000
|
||||||
|
exp := u32 & 0x7f800000
|
||||||
|
coef := u32 & 0x007fffff
|
||||||
|
|
||||||
|
if exp == 0x7f800000 {
|
||||||
|
// NaN or Infinity
|
||||||
|
nanBit := uint32(0)
|
||||||
|
if coef != 0 {
|
||||||
|
nanBit = uint32(0x0200)
|
||||||
|
}
|
||||||
|
return uint16((sign >> 16) | uint32(0x7c00) | nanBit | (coef >> 13))
|
||||||
|
}
|
||||||
|
|
||||||
|
halfSign := sign >> 16
|
||||||
|
|
||||||
|
unbiasedExp := int32(exp>>23) - 127
|
||||||
|
halfExp := unbiasedExp + 15
|
||||||
|
|
||||||
|
if halfExp >= 0x1f {
|
||||||
|
return uint16(halfSign | uint32(0x7c00))
|
||||||
|
}
|
||||||
|
|
||||||
|
if halfExp <= 0 {
|
||||||
|
if 14-halfExp > 24 {
|
||||||
|
return uint16(halfSign)
|
||||||
|
}
|
||||||
|
coef := coef | uint32(0x00800000)
|
||||||
|
halfCoef := coef >> uint32(14-halfExp)
|
||||||
|
roundBit := uint32(1) << uint32(13-halfExp)
|
||||||
|
if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 {
|
||||||
|
halfCoef++
|
||||||
|
}
|
||||||
|
return uint16(halfSign | halfCoef)
|
||||||
|
}
|
||||||
|
|
||||||
|
uHalfExp := uint32(halfExp) << 10
|
||||||
|
halfCoef := coef >> 13
|
||||||
|
roundBit := uint32(0x00001000)
|
||||||
|
if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 {
|
||||||
|
return uint16((halfSign | uHalfExp | halfCoef) + 1)
|
||||||
|
}
|
||||||
|
return uint16(halfSign | uHalfExp | halfCoef)
|
||||||
|
}
|
13
api/vendor/golang.org/x/net/http/httpguts/httplex.go
generated
vendored
13
api/vendor/golang.org/x/net/http/httpguts/httplex.go
generated
vendored
@ -12,7 +12,7 @@ import (
|
|||||||
"golang.org/x/net/idna"
|
"golang.org/x/net/idna"
|
||||||
)
|
)
|
||||||
|
|
||||||
var isTokenTable = [127]bool{
|
var isTokenTable = [256]bool{
|
||||||
'!': true,
|
'!': true,
|
||||||
'#': true,
|
'#': true,
|
||||||
'$': true,
|
'$': true,
|
||||||
@ -93,12 +93,7 @@ var isTokenTable = [127]bool{
|
|||||||
}
|
}
|
||||||
|
|
||||||
func IsTokenRune(r rune) bool {
|
func IsTokenRune(r rune) bool {
|
||||||
i := int(r)
|
return r < utf8.RuneSelf && isTokenTable[byte(r)]
|
||||||
return i < len(isTokenTable) && isTokenTable[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func isNotToken(r rune) bool {
|
|
||||||
return !IsTokenRune(r)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// HeaderValuesContainsToken reports whether any string in values
|
// HeaderValuesContainsToken reports whether any string in values
|
||||||
@ -202,8 +197,8 @@ func ValidHeaderFieldName(v string) bool {
|
|||||||
if len(v) == 0 {
|
if len(v) == 0 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
for _, r := range v {
|
for i := 0; i < len(v); i++ {
|
||||||
if !IsTokenRune(r) {
|
if !isTokenTable[v[i]] {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
13
api/vendor/golang.org/x/net/http2/frame.go
generated
vendored
13
api/vendor/golang.org/x/net/http2/frame.go
generated
vendored
@ -490,6 +490,9 @@ func terminalReadFrameError(err error) bool {
|
|||||||
// returned error is ErrFrameTooLarge. Other errors may be of type
|
// returned error is ErrFrameTooLarge. Other errors may be of type
|
||||||
// ConnectionError, StreamError, or anything else from the underlying
|
// ConnectionError, StreamError, or anything else from the underlying
|
||||||
// reader.
|
// reader.
|
||||||
|
//
|
||||||
|
// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID
|
||||||
|
// indicates the stream responsible for the error.
|
||||||
func (fr *Framer) ReadFrame() (Frame, error) {
|
func (fr *Framer) ReadFrame() (Frame, error) {
|
||||||
fr.errDetail = nil
|
fr.errDetail = nil
|
||||||
if fr.lastFrame != nil {
|
if fr.lastFrame != nil {
|
||||||
@ -1521,7 +1524,7 @@ func (fr *Framer) maxHeaderStringLen() int {
|
|||||||
// readMetaFrame returns 0 or more CONTINUATION frames from fr and
|
// readMetaFrame returns 0 or more CONTINUATION frames from fr and
|
||||||
// merge them into the provided hf and returns a MetaHeadersFrame
|
// merge them into the provided hf and returns a MetaHeadersFrame
|
||||||
// with the decoded hpack values.
|
// with the decoded hpack values.
|
||||||
func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
|
func (fr *Framer) readMetaFrame(hf *HeadersFrame) (Frame, error) {
|
||||||
if fr.AllowIllegalReads {
|
if fr.AllowIllegalReads {
|
||||||
return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders")
|
return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders")
|
||||||
}
|
}
|
||||||
@ -1592,7 +1595,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
|
|||||||
}
|
}
|
||||||
// It would be nice to send a RST_STREAM before sending the GOAWAY,
|
// It would be nice to send a RST_STREAM before sending the GOAWAY,
|
||||||
// but the structure of the server's frame writer makes this difficult.
|
// but the structure of the server's frame writer makes this difficult.
|
||||||
return nil, ConnectionError(ErrCodeProtocol)
|
return mh, ConnectionError(ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Also close the connection after any CONTINUATION frame following an
|
// Also close the connection after any CONTINUATION frame following an
|
||||||
@ -1604,11 +1607,11 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
|
|||||||
}
|
}
|
||||||
// It would be nice to send a RST_STREAM before sending the GOAWAY,
|
// It would be nice to send a RST_STREAM before sending the GOAWAY,
|
||||||
// but the structure of the server's frame writer makes this difficult.
|
// but the structure of the server's frame writer makes this difficult.
|
||||||
return nil, ConnectionError(ErrCodeProtocol)
|
return mh, ConnectionError(ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := hdec.Write(frag); err != nil {
|
if _, err := hdec.Write(frag); err != nil {
|
||||||
return nil, ConnectionError(ErrCodeCompression)
|
return mh, ConnectionError(ErrCodeCompression)
|
||||||
}
|
}
|
||||||
|
|
||||||
if hc.HeadersEnded() {
|
if hc.HeadersEnded() {
|
||||||
@ -1625,7 +1628,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
|
|||||||
mh.HeadersFrame.invalidate()
|
mh.HeadersFrame.invalidate()
|
||||||
|
|
||||||
if err := hdec.Close(); err != nil {
|
if err := hdec.Close(); err != nil {
|
||||||
return nil, ConnectionError(ErrCodeCompression)
|
return mh, ConnectionError(ErrCodeCompression)
|
||||||
}
|
}
|
||||||
if invalid != nil {
|
if invalid != nil {
|
||||||
fr.errDetail = invalid
|
fr.errDetail = invalid
|
||||||
|
19
api/vendor/golang.org/x/net/http2/http2.go
generated
vendored
19
api/vendor/golang.org/x/net/http2/http2.go
generated
vendored
@ -17,6 +17,7 @@ package http2 // import "golang.org/x/net/http2"
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@ -26,6 +27,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"golang.org/x/net/http/httpguts"
|
"golang.org/x/net/http/httpguts"
|
||||||
)
|
)
|
||||||
@ -210,12 +212,6 @@ type stringWriter interface {
|
|||||||
WriteString(s string) (n int, err error)
|
WriteString(s string) (n int, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// A gate lets two goroutines coordinate their activities.
|
|
||||||
type gate chan struct{}
|
|
||||||
|
|
||||||
func (g gate) Done() { g <- struct{}{} }
|
|
||||||
func (g gate) Wait() { <-g }
|
|
||||||
|
|
||||||
// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
|
// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
|
||||||
type closeWaiter chan struct{}
|
type closeWaiter chan struct{}
|
||||||
|
|
||||||
@ -383,3 +379,14 @@ func validPseudoPath(v string) bool {
|
|||||||
// makes that struct also non-comparable, and generally doesn't add
|
// makes that struct also non-comparable, and generally doesn't add
|
||||||
// any size (as long as it's first).
|
// any size (as long as it's first).
|
||||||
type incomparable [0]func()
|
type incomparable [0]func()
|
||||||
|
|
||||||
|
// synctestGroupInterface is the methods of synctestGroup used by Server and Transport.
|
||||||
|
// It's defined as an interface here to let us keep synctestGroup entirely test-only
|
||||||
|
// and not a part of non-test builds.
|
||||||
|
type synctestGroupInterface interface {
|
||||||
|
Join()
|
||||||
|
Now() time.Time
|
||||||
|
NewTimer(d time.Duration) timer
|
||||||
|
AfterFunc(d time.Duration, f func()) timer
|
||||||
|
ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc)
|
||||||
|
}
|
||||||
|
97
api/vendor/golang.org/x/net/http2/server.go
generated
vendored
97
api/vendor/golang.org/x/net/http2/server.go
generated
vendored
@ -154,6 +154,39 @@ type Server struct {
|
|||||||
// so that we don't embed a Mutex in this struct, which will make the
|
// so that we don't embed a Mutex in this struct, which will make the
|
||||||
// struct non-copyable, which might break some callers.
|
// struct non-copyable, which might break some callers.
|
||||||
state *serverInternalState
|
state *serverInternalState
|
||||||
|
|
||||||
|
// Synchronization group used for testing.
|
||||||
|
// Outside of tests, this is nil.
|
||||||
|
group synctestGroupInterface
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) markNewGoroutine() {
|
||||||
|
if s.group != nil {
|
||||||
|
s.group.Join()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) now() time.Time {
|
||||||
|
if s.group != nil {
|
||||||
|
return s.group.Now()
|
||||||
|
}
|
||||||
|
return time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
// newTimer creates a new time.Timer, or a synthetic timer in tests.
|
||||||
|
func (s *Server) newTimer(d time.Duration) timer {
|
||||||
|
if s.group != nil {
|
||||||
|
return s.group.NewTimer(d)
|
||||||
|
}
|
||||||
|
return timeTimer{time.NewTimer(d)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
|
||||||
|
func (s *Server) afterFunc(d time.Duration, f func()) timer {
|
||||||
|
if s.group != nil {
|
||||||
|
return s.group.AfterFunc(d, f)
|
||||||
|
}
|
||||||
|
return timeTimer{time.AfterFunc(d, f)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) initialConnRecvWindowSize() int32 {
|
func (s *Server) initialConnRecvWindowSize() int32 {
|
||||||
@ -400,6 +433,10 @@ func (o *ServeConnOpts) handler() http.Handler {
|
|||||||
//
|
//
|
||||||
// The opts parameter is optional. If nil, default values are used.
|
// The opts parameter is optional. If nil, default values are used.
|
||||||
func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
|
func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
|
||||||
|
s.serveConn(c, opts, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverConn)) {
|
||||||
baseCtx, cancel := serverConnBaseContext(c, opts)
|
baseCtx, cancel := serverConnBaseContext(c, opts)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -426,6 +463,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
|
|||||||
pushEnabled: true,
|
pushEnabled: true,
|
||||||
sawClientPreface: opts.SawClientPreface,
|
sawClientPreface: opts.SawClientPreface,
|
||||||
}
|
}
|
||||||
|
if newf != nil {
|
||||||
|
newf(sc)
|
||||||
|
}
|
||||||
|
|
||||||
s.state.registerConn(sc)
|
s.state.registerConn(sc)
|
||||||
defer s.state.unregisterConn(sc)
|
defer s.state.unregisterConn(sc)
|
||||||
@ -599,8 +639,8 @@ type serverConn struct {
|
|||||||
inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
|
inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
|
||||||
needToSendGoAway bool // we need to schedule a GOAWAY frame write
|
needToSendGoAway bool // we need to schedule a GOAWAY frame write
|
||||||
goAwayCode ErrCode
|
goAwayCode ErrCode
|
||||||
shutdownTimer *time.Timer // nil until used
|
shutdownTimer timer // nil until used
|
||||||
idleTimer *time.Timer // nil if unused
|
idleTimer timer // nil if unused
|
||||||
|
|
||||||
// Owned by the writeFrameAsync goroutine:
|
// Owned by the writeFrameAsync goroutine:
|
||||||
headerWriteBuf bytes.Buffer
|
headerWriteBuf bytes.Buffer
|
||||||
@ -652,8 +692,8 @@ type stream struct {
|
|||||||
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
|
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
|
||||||
gotTrailerHeader bool // HEADER frame for trailers was seen
|
gotTrailerHeader bool // HEADER frame for trailers was seen
|
||||||
wroteHeaders bool // whether we wrote headers (not status 100)
|
wroteHeaders bool // whether we wrote headers (not status 100)
|
||||||
readDeadline *time.Timer // nil if unused
|
readDeadline timer // nil if unused
|
||||||
writeDeadline *time.Timer // nil if unused
|
writeDeadline timer // nil if unused
|
||||||
closeErr error // set before cw is closed
|
closeErr error // set before cw is closed
|
||||||
|
|
||||||
trailer http.Header // accumulated trailers
|
trailer http.Header // accumulated trailers
|
||||||
@ -732,11 +772,7 @@ func isClosedConnError(err error) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: remove this string search and be more like the Windows
|
if errors.Is(err, net.ErrClosed) {
|
||||||
// case below. That might involve modifying the standard library
|
|
||||||
// to return better error types.
|
|
||||||
str := err.Error()
|
|
||||||
if strings.Contains(str, "use of closed network connection") {
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -815,8 +851,9 @@ type readFrameResult struct {
|
|||||||
// consumer is done with the frame.
|
// consumer is done with the frame.
|
||||||
// It's run on its own goroutine.
|
// It's run on its own goroutine.
|
||||||
func (sc *serverConn) readFrames() {
|
func (sc *serverConn) readFrames() {
|
||||||
gate := make(gate)
|
sc.srv.markNewGoroutine()
|
||||||
gateDone := gate.Done
|
gate := make(chan struct{})
|
||||||
|
gateDone := func() { gate <- struct{}{} }
|
||||||
for {
|
for {
|
||||||
f, err := sc.framer.ReadFrame()
|
f, err := sc.framer.ReadFrame()
|
||||||
select {
|
select {
|
||||||
@ -847,6 +884,7 @@ type frameWriteResult struct {
|
|||||||
// At most one goroutine can be running writeFrameAsync at a time per
|
// At most one goroutine can be running writeFrameAsync at a time per
|
||||||
// serverConn.
|
// serverConn.
|
||||||
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
|
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
|
||||||
|
sc.srv.markNewGoroutine()
|
||||||
var err error
|
var err error
|
||||||
if wd == nil {
|
if wd == nil {
|
||||||
err = wr.write.writeFrame(sc)
|
err = wr.write.writeFrame(sc)
|
||||||
@ -926,13 +964,13 @@ func (sc *serverConn) serve() {
|
|||||||
sc.setConnState(http.StateIdle)
|
sc.setConnState(http.StateIdle)
|
||||||
|
|
||||||
if sc.srv.IdleTimeout > 0 {
|
if sc.srv.IdleTimeout > 0 {
|
||||||
sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
|
sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
|
||||||
defer sc.idleTimer.Stop()
|
defer sc.idleTimer.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
go sc.readFrames() // closed by defer sc.conn.Close above
|
go sc.readFrames() // closed by defer sc.conn.Close above
|
||||||
|
|
||||||
settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
|
settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
|
||||||
defer settingsTimer.Stop()
|
defer settingsTimer.Stop()
|
||||||
|
|
||||||
loopNum := 0
|
loopNum := 0
|
||||||
@ -1061,10 +1099,10 @@ func (sc *serverConn) readPreface() error {
|
|||||||
errc <- nil
|
errc <- nil
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
|
timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server?
|
||||||
defer timer.Stop()
|
defer timer.Stop()
|
||||||
select {
|
select {
|
||||||
case <-timer.C:
|
case <-timer.C():
|
||||||
return errPrefaceTimeout
|
return errPrefaceTimeout
|
||||||
case err := <-errc:
|
case err := <-errc:
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -1429,7 +1467,7 @@ func (sc *serverConn) goAway(code ErrCode) {
|
|||||||
|
|
||||||
func (sc *serverConn) shutDownIn(d time.Duration) {
|
func (sc *serverConn) shutDownIn(d time.Duration) {
|
||||||
sc.serveG.check()
|
sc.serveG.check()
|
||||||
sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
|
sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sc *serverConn) resetStream(se StreamError) {
|
func (sc *serverConn) resetStream(se StreamError) {
|
||||||
@ -1482,6 +1520,11 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
|
|||||||
sc.goAway(ErrCodeFlowControl)
|
sc.goAway(ErrCodeFlowControl)
|
||||||
return true
|
return true
|
||||||
case ConnectionError:
|
case ConnectionError:
|
||||||
|
if res.f != nil {
|
||||||
|
if id := res.f.Header().StreamID; id > sc.maxClientStreamID {
|
||||||
|
sc.maxClientStreamID = id
|
||||||
|
}
|
||||||
|
}
|
||||||
sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
|
sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
|
||||||
sc.goAway(ErrCode(ev))
|
sc.goAway(ErrCode(ev))
|
||||||
return true // goAway will handle shutdown
|
return true // goAway will handle shutdown
|
||||||
@ -1638,7 +1681,7 @@ func (sc *serverConn) closeStream(st *stream, err error) {
|
|||||||
delete(sc.streams, st.id)
|
delete(sc.streams, st.id)
|
||||||
if len(sc.streams) == 0 {
|
if len(sc.streams) == 0 {
|
||||||
sc.setConnState(http.StateIdle)
|
sc.setConnState(http.StateIdle)
|
||||||
if sc.srv.IdleTimeout > 0 {
|
if sc.srv.IdleTimeout > 0 && sc.idleTimer != nil {
|
||||||
sc.idleTimer.Reset(sc.srv.IdleTimeout)
|
sc.idleTimer.Reset(sc.srv.IdleTimeout)
|
||||||
}
|
}
|
||||||
if h1ServerKeepAlivesDisabled(sc.hs) {
|
if h1ServerKeepAlivesDisabled(sc.hs) {
|
||||||
@ -1660,6 +1703,7 @@ func (sc *serverConn) closeStream(st *stream, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
st.closeErr = err
|
st.closeErr = err
|
||||||
|
st.cancelCtx()
|
||||||
st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
|
st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
|
||||||
sc.writeSched.CloseStream(st.id)
|
sc.writeSched.CloseStream(st.id)
|
||||||
}
|
}
|
||||||
@ -2020,7 +2064,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
|||||||
// (in Go 1.8), though. That's a more sane option anyway.
|
// (in Go 1.8), though. That's a more sane option anyway.
|
||||||
if sc.hs.ReadTimeout > 0 {
|
if sc.hs.ReadTimeout > 0 {
|
||||||
sc.conn.SetReadDeadline(time.Time{})
|
sc.conn.SetReadDeadline(time.Time{})
|
||||||
st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
|
st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
return sc.scheduleHandler(id, rw, req, handler)
|
return sc.scheduleHandler(id, rw, req, handler)
|
||||||
@ -2118,7 +2162,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
|
|||||||
st.flow.add(sc.initialStreamSendWindowSize)
|
st.flow.add(sc.initialStreamSendWindowSize)
|
||||||
st.inflow.init(sc.srv.initialStreamRecvWindowSize())
|
st.inflow.init(sc.srv.initialStreamRecvWindowSize())
|
||||||
if sc.hs.WriteTimeout > 0 {
|
if sc.hs.WriteTimeout > 0 {
|
||||||
st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
|
st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
sc.streams[id] = st
|
sc.streams[id] = st
|
||||||
@ -2342,6 +2386,7 @@ func (sc *serverConn) handlerDone() {
|
|||||||
|
|
||||||
// Run on its own goroutine.
|
// Run on its own goroutine.
|
||||||
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
|
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
|
||||||
|
sc.srv.markNewGoroutine()
|
||||||
defer sc.sendServeMsg(handlerDoneMsg)
|
defer sc.sendServeMsg(handlerDoneMsg)
|
||||||
didPanic := true
|
didPanic := true
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -2638,7 +2683,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
|
|||||||
var date string
|
var date string
|
||||||
if _, ok := rws.snapHeader["Date"]; !ok {
|
if _, ok := rws.snapHeader["Date"]; !ok {
|
||||||
// TODO(bradfitz): be faster here, like net/http? measure.
|
// TODO(bradfitz): be faster here, like net/http? measure.
|
||||||
date = time.Now().UTC().Format(http.TimeFormat)
|
date = rws.conn.srv.now().UTC().Format(http.TimeFormat)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, v := range rws.snapHeader["Trailer"] {
|
for _, v := range rws.snapHeader["Trailer"] {
|
||||||
@ -2760,7 +2805,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() {
|
|||||||
|
|
||||||
func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
|
func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
|
||||||
st := w.rws.stream
|
st := w.rws.stream
|
||||||
if !deadline.IsZero() && deadline.Before(time.Now()) {
|
if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
|
||||||
// If we're setting a deadline in the past, reset the stream immediately
|
// If we're setting a deadline in the past, reset the stream immediately
|
||||||
// so writes after SetWriteDeadline returns will fail.
|
// so writes after SetWriteDeadline returns will fail.
|
||||||
st.onReadTimeout()
|
st.onReadTimeout()
|
||||||
@ -2776,9 +2821,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
|
|||||||
if deadline.IsZero() {
|
if deadline.IsZero() {
|
||||||
st.readDeadline = nil
|
st.readDeadline = nil
|
||||||
} else if st.readDeadline == nil {
|
} else if st.readDeadline == nil {
|
||||||
st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
|
st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout)
|
||||||
} else {
|
} else {
|
||||||
st.readDeadline.Reset(deadline.Sub(time.Now()))
|
st.readDeadline.Reset(deadline.Sub(sc.srv.now()))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
return nil
|
return nil
|
||||||
@ -2786,7 +2831,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
|
|||||||
|
|
||||||
func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
|
func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
|
||||||
st := w.rws.stream
|
st := w.rws.stream
|
||||||
if !deadline.IsZero() && deadline.Before(time.Now()) {
|
if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
|
||||||
// If we're setting a deadline in the past, reset the stream immediately
|
// If we're setting a deadline in the past, reset the stream immediately
|
||||||
// so writes after SetWriteDeadline returns will fail.
|
// so writes after SetWriteDeadline returns will fail.
|
||||||
st.onWriteTimeout()
|
st.onWriteTimeout()
|
||||||
@ -2802,9 +2847,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
|
|||||||
if deadline.IsZero() {
|
if deadline.IsZero() {
|
||||||
st.writeDeadline = nil
|
st.writeDeadline = nil
|
||||||
} else if st.writeDeadline == nil {
|
} else if st.writeDeadline == nil {
|
||||||
st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
|
st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout)
|
||||||
} else {
|
} else {
|
||||||
st.writeDeadline.Reset(deadline.Sub(time.Now()))
|
st.writeDeadline.Reset(deadline.Sub(sc.srv.now()))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
return nil
|
return nil
|
||||||
|
331
api/vendor/golang.org/x/net/http2/testsync.go
generated
vendored
331
api/vendor/golang.org/x/net/http2/testsync.go
generated
vendored
@ -1,331 +0,0 @@
|
|||||||
// Copyright 2024 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
package http2
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// testSyncHooks coordinates goroutines in tests.
|
|
||||||
//
|
|
||||||
// For example, a call to ClientConn.RoundTrip involves several goroutines, including:
|
|
||||||
// - the goroutine running RoundTrip;
|
|
||||||
// - the clientStream.doRequest goroutine, which writes the request; and
|
|
||||||
// - the clientStream.readLoop goroutine, which reads the response.
|
|
||||||
//
|
|
||||||
// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines
|
|
||||||
// are blocked waiting for some condition such as reading the Request.Body or waiting for
|
|
||||||
// flow control to become available.
|
|
||||||
//
|
|
||||||
// The testSyncHooks also manage timers and synthetic time in tests.
|
|
||||||
// This permits us to, for example, start a request and cause it to time out waiting for
|
|
||||||
// response headers without resorting to time.Sleep calls.
|
|
||||||
type testSyncHooks struct {
|
|
||||||
// active/inactive act as a mutex and condition variable.
|
|
||||||
//
|
|
||||||
// - neither chan contains a value: testSyncHooks is locked.
|
|
||||||
// - active contains a value: unlocked, and at least one goroutine is not blocked
|
|
||||||
// - inactive contains a value: unlocked, and all goroutines are blocked
|
|
||||||
active chan struct{}
|
|
||||||
inactive chan struct{}
|
|
||||||
|
|
||||||
// goroutine counts
|
|
||||||
total int // total goroutines
|
|
||||||
condwait map[*sync.Cond]int // blocked in sync.Cond.Wait
|
|
||||||
blocked []*testBlockedGoroutine // otherwise blocked
|
|
||||||
|
|
||||||
// fake time
|
|
||||||
now time.Time
|
|
||||||
timers []*fakeTimer
|
|
||||||
|
|
||||||
// Transport testing: Report various events.
|
|
||||||
newclientconn func(*ClientConn)
|
|
||||||
newstream func(*clientStream)
|
|
||||||
}
|
|
||||||
|
|
||||||
// testBlockedGoroutine is a blocked goroutine.
|
|
||||||
type testBlockedGoroutine struct {
|
|
||||||
f func() bool // blocked until f returns true
|
|
||||||
ch chan struct{} // closed when unblocked
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTestSyncHooks() *testSyncHooks {
|
|
||||||
h := &testSyncHooks{
|
|
||||||
active: make(chan struct{}, 1),
|
|
||||||
inactive: make(chan struct{}, 1),
|
|
||||||
condwait: map[*sync.Cond]int{},
|
|
||||||
}
|
|
||||||
h.inactive <- struct{}{}
|
|
||||||
h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// lock acquires the testSyncHooks mutex.
|
|
||||||
func (h *testSyncHooks) lock() {
|
|
||||||
select {
|
|
||||||
case <-h.active:
|
|
||||||
case <-h.inactive:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// waitInactive waits for all goroutines to become inactive.
|
|
||||||
func (h *testSyncHooks) waitInactive() {
|
|
||||||
for {
|
|
||||||
<-h.inactive
|
|
||||||
if !h.unlock() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// unlock releases the testSyncHooks mutex.
|
|
||||||
// It reports whether any goroutines are active.
|
|
||||||
func (h *testSyncHooks) unlock() (active bool) {
|
|
||||||
// Look for a blocked goroutine which can be unblocked.
|
|
||||||
blocked := h.blocked[:0]
|
|
||||||
unblocked := false
|
|
||||||
for _, b := range h.blocked {
|
|
||||||
if !unblocked && b.f() {
|
|
||||||
unblocked = true
|
|
||||||
close(b.ch)
|
|
||||||
} else {
|
|
||||||
blocked = append(blocked, b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
h.blocked = blocked
|
|
||||||
|
|
||||||
// Count goroutines blocked on condition variables.
|
|
||||||
condwait := 0
|
|
||||||
for _, count := range h.condwait {
|
|
||||||
condwait += count
|
|
||||||
}
|
|
||||||
|
|
||||||
if h.total > condwait+len(blocked) {
|
|
||||||
h.active <- struct{}{}
|
|
||||||
return true
|
|
||||||
} else {
|
|
||||||
h.inactive <- struct{}{}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// goRun starts a new goroutine.
|
|
||||||
func (h *testSyncHooks) goRun(f func()) {
|
|
||||||
h.lock()
|
|
||||||
h.total++
|
|
||||||
h.unlock()
|
|
||||||
go func() {
|
|
||||||
defer func() {
|
|
||||||
h.lock()
|
|
||||||
h.total--
|
|
||||||
h.unlock()
|
|
||||||
}()
|
|
||||||
f()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// blockUntil indicates that a goroutine is blocked waiting for some condition to become true.
|
|
||||||
// It waits until f returns true before proceeding.
|
|
||||||
//
|
|
||||||
// Example usage:
|
|
||||||
//
|
|
||||||
// h.blockUntil(func() bool {
|
|
||||||
// // Is the context done yet?
|
|
||||||
// select {
|
|
||||||
// case <-ctx.Done():
|
|
||||||
// default:
|
|
||||||
// return false
|
|
||||||
// }
|
|
||||||
// return true
|
|
||||||
// })
|
|
||||||
// // Wait for the context to become done.
|
|
||||||
// <-ctx.Done()
|
|
||||||
//
|
|
||||||
// The function f passed to blockUntil must be non-blocking and idempotent.
|
|
||||||
func (h *testSyncHooks) blockUntil(f func() bool) {
|
|
||||||
if f() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ch := make(chan struct{})
|
|
||||||
h.lock()
|
|
||||||
h.blocked = append(h.blocked, &testBlockedGoroutine{
|
|
||||||
f: f,
|
|
||||||
ch: ch,
|
|
||||||
})
|
|
||||||
h.unlock()
|
|
||||||
<-ch
|
|
||||||
}
|
|
||||||
|
|
||||||
// broadcast is sync.Cond.Broadcast.
|
|
||||||
func (h *testSyncHooks) condBroadcast(cond *sync.Cond) {
|
|
||||||
h.lock()
|
|
||||||
delete(h.condwait, cond)
|
|
||||||
h.unlock()
|
|
||||||
cond.Broadcast()
|
|
||||||
}
|
|
||||||
|
|
||||||
// broadcast is sync.Cond.Wait.
|
|
||||||
func (h *testSyncHooks) condWait(cond *sync.Cond) {
|
|
||||||
h.lock()
|
|
||||||
h.condwait[cond]++
|
|
||||||
h.unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// newTimer creates a new fake timer.
|
|
||||||
func (h *testSyncHooks) newTimer(d time.Duration) timer {
|
|
||||||
h.lock()
|
|
||||||
defer h.unlock()
|
|
||||||
t := &fakeTimer{
|
|
||||||
hooks: h,
|
|
||||||
when: h.now.Add(d),
|
|
||||||
c: make(chan time.Time),
|
|
||||||
}
|
|
||||||
h.timers = append(h.timers, t)
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// afterFunc creates a new fake AfterFunc timer.
|
|
||||||
func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer {
|
|
||||||
h.lock()
|
|
||||||
defer h.unlock()
|
|
||||||
t := &fakeTimer{
|
|
||||||
hooks: h,
|
|
||||||
when: h.now.Add(d),
|
|
||||||
f: f,
|
|
||||||
}
|
|
||||||
h.timers = append(h.timers, t)
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
t := h.afterFunc(d, cancel)
|
|
||||||
return ctx, func() {
|
|
||||||
t.Stop()
|
|
||||||
cancel()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *testSyncHooks) timeUntilEvent() time.Duration {
|
|
||||||
h.lock()
|
|
||||||
defer h.unlock()
|
|
||||||
var next time.Time
|
|
||||||
for _, t := range h.timers {
|
|
||||||
if next.IsZero() || t.when.Before(next) {
|
|
||||||
next = t.when
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if d := next.Sub(h.now); d > 0 {
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// advance advances time and causes synthetic timers to fire.
|
|
||||||
func (h *testSyncHooks) advance(d time.Duration) {
|
|
||||||
h.lock()
|
|
||||||
defer h.unlock()
|
|
||||||
h.now = h.now.Add(d)
|
|
||||||
timers := h.timers[:0]
|
|
||||||
for _, t := range h.timers {
|
|
||||||
t := t // remove after go.mod depends on go1.22
|
|
||||||
t.mu.Lock()
|
|
||||||
switch {
|
|
||||||
case t.when.After(h.now):
|
|
||||||
timers = append(timers, t)
|
|
||||||
case t.when.IsZero():
|
|
||||||
// stopped timer
|
|
||||||
default:
|
|
||||||
t.when = time.Time{}
|
|
||||||
if t.c != nil {
|
|
||||||
close(t.c)
|
|
||||||
}
|
|
||||||
if t.f != nil {
|
|
||||||
h.total++
|
|
||||||
go func() {
|
|
||||||
defer func() {
|
|
||||||
h.lock()
|
|
||||||
h.total--
|
|
||||||
h.unlock()
|
|
||||||
}()
|
|
||||||
t.f()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t.mu.Unlock()
|
|
||||||
}
|
|
||||||
h.timers = timers
|
|
||||||
}
|
|
||||||
|
|
||||||
// A timer wraps a time.Timer, or a synthetic equivalent in tests.
|
|
||||||
// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires.
|
|
||||||
type timer interface {
|
|
||||||
C() <-chan time.Time
|
|
||||||
Stop() bool
|
|
||||||
Reset(d time.Duration) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// timeTimer implements timer using real time.
|
|
||||||
type timeTimer struct {
|
|
||||||
t *time.Timer
|
|
||||||
c chan time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// newTimeTimer creates a new timer using real time.
|
|
||||||
func newTimeTimer(d time.Duration) timer {
|
|
||||||
ch := make(chan time.Time)
|
|
||||||
t := time.AfterFunc(d, func() {
|
|
||||||
close(ch)
|
|
||||||
})
|
|
||||||
return &timeTimer{t, ch}
|
|
||||||
}
|
|
||||||
|
|
||||||
// newTimeAfterFunc creates an AfterFunc timer using real time.
|
|
||||||
func newTimeAfterFunc(d time.Duration, f func()) timer {
|
|
||||||
return &timeTimer{
|
|
||||||
t: time.AfterFunc(d, f),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t timeTimer) C() <-chan time.Time { return t.c }
|
|
||||||
func (t timeTimer) Stop() bool { return t.t.Stop() }
|
|
||||||
func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) }
|
|
||||||
|
|
||||||
// fakeTimer implements timer using fake time.
|
|
||||||
type fakeTimer struct {
|
|
||||||
hooks *testSyncHooks
|
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
when time.Time // when the timer will fire
|
|
||||||
c chan time.Time // closed when the timer fires; mutually exclusive with f
|
|
||||||
f func() // called when the timer fires; mutually exclusive with c
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *fakeTimer) C() <-chan time.Time { return t.c }
|
|
||||||
|
|
||||||
func (t *fakeTimer) Stop() bool {
|
|
||||||
t.mu.Lock()
|
|
||||||
defer t.mu.Unlock()
|
|
||||||
stopped := t.when.IsZero()
|
|
||||||
t.when = time.Time{}
|
|
||||||
return stopped
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *fakeTimer) Reset(d time.Duration) bool {
|
|
||||||
if t.c != nil || t.f == nil {
|
|
||||||
panic("fakeTimer only supports Reset on AfterFunc timers")
|
|
||||||
}
|
|
||||||
t.mu.Lock()
|
|
||||||
defer t.mu.Unlock()
|
|
||||||
t.hooks.lock()
|
|
||||||
defer t.hooks.unlock()
|
|
||||||
active := !t.when.IsZero()
|
|
||||||
t.when = t.hooks.now.Add(d)
|
|
||||||
if !active {
|
|
||||||
t.hooks.timers = append(t.hooks.timers, t)
|
|
||||||
}
|
|
||||||
return active
|
|
||||||
}
|
|
20
api/vendor/golang.org/x/net/http2/timer.go
generated
vendored
Normal file
20
api/vendor/golang.org/x/net/http2/timer.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
// Copyright 2024 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// A timer is a time.Timer, as an interface which can be replaced in tests.
|
||||||
|
type timer = interface {
|
||||||
|
C() <-chan time.Time
|
||||||
|
Reset(d time.Duration) bool
|
||||||
|
Stop() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// timeTimer adapts a time.Timer to the timer interface.
|
||||||
|
type timeTimer struct {
|
||||||
|
*time.Timer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t timeTimer) C() <-chan time.Time { return t.Timer.C }
|
325
api/vendor/golang.org/x/net/http2/transport.go
generated
vendored
325
api/vendor/golang.org/x/net/http2/transport.go
generated
vendored
@ -185,7 +185,45 @@ type Transport struct {
|
|||||||
connPoolOnce sync.Once
|
connPoolOnce sync.Once
|
||||||
connPoolOrDef ClientConnPool // non-nil version of ConnPool
|
connPoolOrDef ClientConnPool // non-nil version of ConnPool
|
||||||
|
|
||||||
syncHooks *testSyncHooks
|
*transportTestHooks
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hook points used for testing.
|
||||||
|
// Outside of tests, t.transportTestHooks is nil and these all have minimal implementations.
|
||||||
|
// Inside tests, see the testSyncHooks function docs.
|
||||||
|
|
||||||
|
type transportTestHooks struct {
|
||||||
|
newclientconn func(*ClientConn)
|
||||||
|
group synctestGroupInterface
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Transport) markNewGoroutine() {
|
||||||
|
if t != nil && t.transportTestHooks != nil {
|
||||||
|
t.transportTestHooks.group.Join()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newTimer creates a new time.Timer, or a synthetic timer in tests.
|
||||||
|
func (t *Transport) newTimer(d time.Duration) timer {
|
||||||
|
if t.transportTestHooks != nil {
|
||||||
|
return t.transportTestHooks.group.NewTimer(d)
|
||||||
|
}
|
||||||
|
return timeTimer{time.NewTimer(d)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
|
||||||
|
func (t *Transport) afterFunc(d time.Duration, f func()) timer {
|
||||||
|
if t.transportTestHooks != nil {
|
||||||
|
return t.transportTestHooks.group.AfterFunc(d, f)
|
||||||
|
}
|
||||||
|
return timeTimer{time.AfterFunc(d, f)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
|
||||||
|
if t.transportTestHooks != nil {
|
||||||
|
return t.transportTestHooks.group.ContextWithTimeout(ctx, d)
|
||||||
|
}
|
||||||
|
return context.WithTimeout(ctx, d)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Transport) maxHeaderListSize() uint32 {
|
func (t *Transport) maxHeaderListSize() uint32 {
|
||||||
@ -352,60 +390,6 @@ type ClientConn struct {
|
|||||||
werr error // first write error that has occurred
|
werr error // first write error that has occurred
|
||||||
hbuf bytes.Buffer // HPACK encoder writes into this
|
hbuf bytes.Buffer // HPACK encoder writes into this
|
||||||
henc *hpack.Encoder
|
henc *hpack.Encoder
|
||||||
|
|
||||||
syncHooks *testSyncHooks // can be nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hook points used for testing.
|
|
||||||
// Outside of tests, cc.syncHooks is nil and these all have minimal implementations.
|
|
||||||
// Inside tests, see the testSyncHooks function docs.
|
|
||||||
|
|
||||||
// goRun starts a new goroutine.
|
|
||||||
func (cc *ClientConn) goRun(f func()) {
|
|
||||||
if cc.syncHooks != nil {
|
|
||||||
cc.syncHooks.goRun(f)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
go f()
|
|
||||||
}
|
|
||||||
|
|
||||||
// condBroadcast is cc.cond.Broadcast.
|
|
||||||
func (cc *ClientConn) condBroadcast() {
|
|
||||||
if cc.syncHooks != nil {
|
|
||||||
cc.syncHooks.condBroadcast(cc.cond)
|
|
||||||
}
|
|
||||||
cc.cond.Broadcast()
|
|
||||||
}
|
|
||||||
|
|
||||||
// condWait is cc.cond.Wait.
|
|
||||||
func (cc *ClientConn) condWait() {
|
|
||||||
if cc.syncHooks != nil {
|
|
||||||
cc.syncHooks.condWait(cc.cond)
|
|
||||||
}
|
|
||||||
cc.cond.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
// newTimer creates a new time.Timer, or a synthetic timer in tests.
|
|
||||||
func (cc *ClientConn) newTimer(d time.Duration) timer {
|
|
||||||
if cc.syncHooks != nil {
|
|
||||||
return cc.syncHooks.newTimer(d)
|
|
||||||
}
|
|
||||||
return newTimeTimer(d)
|
|
||||||
}
|
|
||||||
|
|
||||||
// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
|
|
||||||
func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer {
|
|
||||||
if cc.syncHooks != nil {
|
|
||||||
return cc.syncHooks.afterFunc(d, f)
|
|
||||||
}
|
|
||||||
return newTimeAfterFunc(d, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
|
|
||||||
if cc.syncHooks != nil {
|
|
||||||
return cc.syncHooks.contextWithTimeout(ctx, d)
|
|
||||||
}
|
|
||||||
return context.WithTimeout(ctx, d)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// clientStream is the state for a single HTTP/2 stream. One of these
|
// clientStream is the state for a single HTTP/2 stream. One of these
|
||||||
@ -487,7 +471,7 @@ func (cs *clientStream) abortStreamLocked(err error) {
|
|||||||
// TODO(dneil): Clean up tests where cs.cc.cond is nil.
|
// TODO(dneil): Clean up tests where cs.cc.cond is nil.
|
||||||
if cs.cc.cond != nil {
|
if cs.cc.cond != nil {
|
||||||
// Wake up writeRequestBody if it is waiting on flow control.
|
// Wake up writeRequestBody if it is waiting on flow control.
|
||||||
cs.cc.condBroadcast()
|
cs.cc.cond.Broadcast()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -497,7 +481,7 @@ func (cs *clientStream) abortRequestBodyWrite() {
|
|||||||
defer cc.mu.Unlock()
|
defer cc.mu.Unlock()
|
||||||
if cs.reqBody != nil && cs.reqBodyClosed == nil {
|
if cs.reqBody != nil && cs.reqBodyClosed == nil {
|
||||||
cs.closeReqBodyLocked()
|
cs.closeReqBodyLocked()
|
||||||
cc.condBroadcast()
|
cc.cond.Broadcast()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -507,10 +491,11 @@ func (cs *clientStream) closeReqBodyLocked() {
|
|||||||
}
|
}
|
||||||
cs.reqBodyClosed = make(chan struct{})
|
cs.reqBodyClosed = make(chan struct{})
|
||||||
reqBodyClosed := cs.reqBodyClosed
|
reqBodyClosed := cs.reqBodyClosed
|
||||||
cs.cc.goRun(func() {
|
go func() {
|
||||||
|
cs.cc.t.markNewGoroutine()
|
||||||
cs.reqBody.Close()
|
cs.reqBody.Close()
|
||||||
close(reqBodyClosed)
|
close(reqBodyClosed)
|
||||||
})
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
type stickyErrWriter struct {
|
type stickyErrWriter struct {
|
||||||
@ -626,21 +611,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
|
|||||||
backoff := float64(uint(1) << (uint(retry) - 1))
|
backoff := float64(uint(1) << (uint(retry) - 1))
|
||||||
backoff += backoff * (0.1 * mathrand.Float64())
|
backoff += backoff * (0.1 * mathrand.Float64())
|
||||||
d := time.Second * time.Duration(backoff)
|
d := time.Second * time.Duration(backoff)
|
||||||
var tm timer
|
tm := t.newTimer(d)
|
||||||
if t.syncHooks != nil {
|
|
||||||
tm = t.syncHooks.newTimer(d)
|
|
||||||
t.syncHooks.blockUntil(func() bool {
|
|
||||||
select {
|
|
||||||
case <-tm.C():
|
|
||||||
case <-req.Context().Done():
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
tm = newTimeTimer(d)
|
|
||||||
}
|
|
||||||
select {
|
select {
|
||||||
case <-tm.C():
|
case <-tm.C():
|
||||||
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
|
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
|
||||||
@ -725,8 +696,8 @@ func canRetryError(err error) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) {
|
func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) {
|
||||||
if t.syncHooks != nil {
|
if t.transportTestHooks != nil {
|
||||||
return t.newClientConn(nil, singleUse, t.syncHooks)
|
return t.newClientConn(nil, singleUse)
|
||||||
}
|
}
|
||||||
host, _, err := net.SplitHostPort(addr)
|
host, _, err := net.SplitHostPort(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -736,7 +707,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return t.newClientConn(tconn, singleUse, nil)
|
return t.newClientConn(tconn, singleUse)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Transport) newTLSConfig(host string) *tls.Config {
|
func (t *Transport) newTLSConfig(host string) *tls.Config {
|
||||||
@ -802,10 +773,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
|
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
|
||||||
return t.newClientConn(c, t.disableKeepAlives(), nil)
|
return t.newClientConn(c, t.disableKeepAlives())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) {
|
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
|
||||||
cc := &ClientConn{
|
cc := &ClientConn{
|
||||||
t: t,
|
t: t,
|
||||||
tconn: c,
|
tconn: c,
|
||||||
@ -820,16 +791,12 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo
|
|||||||
wantSettingsAck: true,
|
wantSettingsAck: true,
|
||||||
pings: make(map[[8]byte]chan struct{}),
|
pings: make(map[[8]byte]chan struct{}),
|
||||||
reqHeaderMu: make(chan struct{}, 1),
|
reqHeaderMu: make(chan struct{}, 1),
|
||||||
syncHooks: hooks,
|
|
||||||
}
|
}
|
||||||
if hooks != nil {
|
if t.transportTestHooks != nil {
|
||||||
hooks.newclientconn(cc)
|
t.markNewGoroutine()
|
||||||
|
t.transportTestHooks.newclientconn(cc)
|
||||||
c = cc.tconn
|
c = cc.tconn
|
||||||
}
|
}
|
||||||
if d := t.idleConnTimeout(); d != 0 {
|
|
||||||
cc.idleTimeout = d
|
|
||||||
cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout)
|
|
||||||
}
|
|
||||||
if VerboseLogs {
|
if VerboseLogs {
|
||||||
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
|
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
|
||||||
}
|
}
|
||||||
@ -893,7 +860,13 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo
|
|||||||
return nil, cc.werr
|
return nil, cc.werr
|
||||||
}
|
}
|
||||||
|
|
||||||
cc.goRun(cc.readLoop)
|
// Start the idle timer after the connection is fully initialized.
|
||||||
|
if d := t.idleConnTimeout(); d != 0 {
|
||||||
|
cc.idleTimeout = d
|
||||||
|
cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
go cc.readLoop()
|
||||||
return cc, nil
|
return cc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -901,7 +874,7 @@ func (cc *ClientConn) healthCheck() {
|
|||||||
pingTimeout := cc.t.pingTimeout()
|
pingTimeout := cc.t.pingTimeout()
|
||||||
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
|
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
|
||||||
// trigger the healthCheck again if there is no frame received.
|
// trigger the healthCheck again if there is no frame received.
|
||||||
ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout)
|
ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
cc.vlogf("http2: Transport sending health check")
|
cc.vlogf("http2: Transport sending health check")
|
||||||
err := cc.Ping(ctx)
|
err := cc.Ping(ctx)
|
||||||
@ -936,7 +909,20 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
|
|||||||
}
|
}
|
||||||
last := f.LastStreamID
|
last := f.LastStreamID
|
||||||
for streamID, cs := range cc.streams {
|
for streamID, cs := range cc.streams {
|
||||||
if streamID > last {
|
if streamID <= last {
|
||||||
|
// The server's GOAWAY indicates that it received this stream.
|
||||||
|
// It will either finish processing it, or close the connection
|
||||||
|
// without doing so. Either way, leave the stream alone for now.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if streamID == 1 && cc.goAway.ErrCode != ErrCodeNo {
|
||||||
|
// Don't retry the first stream on a connection if we get a non-NO error.
|
||||||
|
// If the server is sending an error on a new connection,
|
||||||
|
// retrying the request on a new one probably isn't going to work.
|
||||||
|
cs.abortStreamLocked(fmt.Errorf("http2: Transport received GOAWAY from server ErrCode:%v", cc.goAway.ErrCode))
|
||||||
|
} else {
|
||||||
|
// Aborting the stream with errClentConnGotGoAway indicates that
|
||||||
|
// the request should be retried on a new connection.
|
||||||
cs.abortStreamLocked(errClientConnGotGoAway)
|
cs.abortStreamLocked(errClientConnGotGoAway)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1131,7 +1117,8 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
|
|||||||
// Wait for all in-flight streams to complete or connection to close
|
// Wait for all in-flight streams to complete or connection to close
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
cancelled := false // guarded by cc.mu
|
cancelled := false // guarded by cc.mu
|
||||||
cc.goRun(func() {
|
go func() {
|
||||||
|
cc.t.markNewGoroutine()
|
||||||
cc.mu.Lock()
|
cc.mu.Lock()
|
||||||
defer cc.mu.Unlock()
|
defer cc.mu.Unlock()
|
||||||
for {
|
for {
|
||||||
@ -1143,9 +1130,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
|
|||||||
if cancelled {
|
if cancelled {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
cc.condWait()
|
cc.cond.Wait()
|
||||||
}
|
}
|
||||||
})
|
}()
|
||||||
shutdownEnterWaitStateHook()
|
shutdownEnterWaitStateHook()
|
||||||
select {
|
select {
|
||||||
case <-done:
|
case <-done:
|
||||||
@ -1155,7 +1142,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
|
|||||||
cc.mu.Lock()
|
cc.mu.Lock()
|
||||||
// Free the goroutine above
|
// Free the goroutine above
|
||||||
cancelled = true
|
cancelled = true
|
||||||
cc.condBroadcast()
|
cc.cond.Broadcast()
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
@ -1193,7 +1180,7 @@ func (cc *ClientConn) closeForError(err error) {
|
|||||||
for _, cs := range cc.streams {
|
for _, cs := range cc.streams {
|
||||||
cs.abortStreamLocked(err)
|
cs.abortStreamLocked(err)
|
||||||
}
|
}
|
||||||
cc.condBroadcast()
|
cc.cond.Broadcast()
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
cc.closeConn()
|
cc.closeConn()
|
||||||
}
|
}
|
||||||
@ -1308,23 +1295,30 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
|
|||||||
respHeaderRecv: make(chan struct{}),
|
respHeaderRecv: make(chan struct{}),
|
||||||
donec: make(chan struct{}),
|
donec: make(chan struct{}),
|
||||||
}
|
}
|
||||||
cc.goRun(func() {
|
|
||||||
cs.doRequest(req)
|
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
|
||||||
})
|
if !cc.t.disableCompression() &&
|
||||||
|
req.Header.Get("Accept-Encoding") == "" &&
|
||||||
|
req.Header.Get("Range") == "" &&
|
||||||
|
!cs.isHead {
|
||||||
|
// Request gzip only, not deflate. Deflate is ambiguous and
|
||||||
|
// not as universally supported anyway.
|
||||||
|
// See: https://zlib.net/zlib_faq.html#faq39
|
||||||
|
//
|
||||||
|
// Note that we don't request this for HEAD requests,
|
||||||
|
// due to a bug in nginx:
|
||||||
|
// http://trac.nginx.org/nginx/ticket/358
|
||||||
|
// https://golang.org/issue/5522
|
||||||
|
//
|
||||||
|
// We don't request gzip if the request is for a range, since
|
||||||
|
// auto-decoding a portion of a gzipped document will just fail
|
||||||
|
// anyway. See https://golang.org/issue/8923
|
||||||
|
cs.requestedGzip = true
|
||||||
|
}
|
||||||
|
|
||||||
|
go cs.doRequest(req, streamf)
|
||||||
|
|
||||||
waitDone := func() error {
|
waitDone := func() error {
|
||||||
if cc.syncHooks != nil {
|
|
||||||
cc.syncHooks.blockUntil(func() bool {
|
|
||||||
select {
|
|
||||||
case <-cs.donec:
|
|
||||||
case <-ctx.Done():
|
|
||||||
case <-cs.reqCancel:
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
select {
|
select {
|
||||||
case <-cs.donec:
|
case <-cs.donec:
|
||||||
return nil
|
return nil
|
||||||
@ -1385,24 +1379,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if streamf != nil {
|
|
||||||
streamf(cs)
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if cc.syncHooks != nil {
|
|
||||||
cc.syncHooks.blockUntil(func() bool {
|
|
||||||
select {
|
|
||||||
case <-cs.respHeaderRecv:
|
|
||||||
case <-cs.abort:
|
|
||||||
case <-ctx.Done():
|
|
||||||
case <-cs.reqCancel:
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
select {
|
select {
|
||||||
case <-cs.respHeaderRecv:
|
case <-cs.respHeaderRecv:
|
||||||
return handleResponseHeaders()
|
return handleResponseHeaders()
|
||||||
@ -1432,8 +1409,9 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
|
|||||||
// doRequest runs for the duration of the request lifetime.
|
// doRequest runs for the duration of the request lifetime.
|
||||||
//
|
//
|
||||||
// It sends the request and performs post-request cleanup (closing Request.Body, etc.).
|
// It sends the request and performs post-request cleanup (closing Request.Body, etc.).
|
||||||
func (cs *clientStream) doRequest(req *http.Request) {
|
func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) {
|
||||||
err := cs.writeRequest(req)
|
cs.cc.t.markNewGoroutine()
|
||||||
|
err := cs.writeRequest(req, streamf)
|
||||||
cs.cleanupWriteRequest(err)
|
cs.cleanupWriteRequest(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1444,7 +1422,7 @@ func (cs *clientStream) doRequest(req *http.Request) {
|
|||||||
//
|
//
|
||||||
// It returns non-nil if the request ends otherwise.
|
// It returns non-nil if the request ends otherwise.
|
||||||
// If the returned error is StreamError, the error Code may be used in resetting the stream.
|
// If the returned error is StreamError, the error Code may be used in resetting the stream.
|
||||||
func (cs *clientStream) writeRequest(req *http.Request) (err error) {
|
func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStream)) (err error) {
|
||||||
cc := cs.cc
|
cc := cs.cc
|
||||||
ctx := cs.ctx
|
ctx := cs.ctx
|
||||||
|
|
||||||
@ -1458,21 +1436,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
|
|||||||
if cc.reqHeaderMu == nil {
|
if cc.reqHeaderMu == nil {
|
||||||
panic("RoundTrip on uninitialized ClientConn") // for tests
|
panic("RoundTrip on uninitialized ClientConn") // for tests
|
||||||
}
|
}
|
||||||
var newStreamHook func(*clientStream)
|
|
||||||
if cc.syncHooks != nil {
|
|
||||||
newStreamHook = cc.syncHooks.newstream
|
|
||||||
cc.syncHooks.blockUntil(func() bool {
|
|
||||||
select {
|
|
||||||
case cc.reqHeaderMu <- struct{}{}:
|
|
||||||
<-cc.reqHeaderMu
|
|
||||||
case <-cs.reqCancel:
|
|
||||||
case <-ctx.Done():
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
select {
|
select {
|
||||||
case cc.reqHeaderMu <- struct{}{}:
|
case cc.reqHeaderMu <- struct{}{}:
|
||||||
case <-cs.reqCancel:
|
case <-cs.reqCancel:
|
||||||
@ -1497,28 +1460,8 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
|
|||||||
}
|
}
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
|
|
||||||
if newStreamHook != nil {
|
if streamf != nil {
|
||||||
newStreamHook(cs)
|
streamf(cs)
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
|
|
||||||
if !cc.t.disableCompression() &&
|
|
||||||
req.Header.Get("Accept-Encoding") == "" &&
|
|
||||||
req.Header.Get("Range") == "" &&
|
|
||||||
!cs.isHead {
|
|
||||||
// Request gzip only, not deflate. Deflate is ambiguous and
|
|
||||||
// not as universally supported anyway.
|
|
||||||
// See: https://zlib.net/zlib_faq.html#faq39
|
|
||||||
//
|
|
||||||
// Note that we don't request this for HEAD requests,
|
|
||||||
// due to a bug in nginx:
|
|
||||||
// http://trac.nginx.org/nginx/ticket/358
|
|
||||||
// https://golang.org/issue/5522
|
|
||||||
//
|
|
||||||
// We don't request gzip if the request is for a range, since
|
|
||||||
// auto-decoding a portion of a gzipped document will just fail
|
|
||||||
// anyway. See https://golang.org/issue/8923
|
|
||||||
cs.requestedGzip = true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
continueTimeout := cc.t.expectContinueTimeout()
|
continueTimeout := cc.t.expectContinueTimeout()
|
||||||
@ -1581,7 +1524,7 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
|
|||||||
var respHeaderTimer <-chan time.Time
|
var respHeaderTimer <-chan time.Time
|
||||||
var respHeaderRecv chan struct{}
|
var respHeaderRecv chan struct{}
|
||||||
if d := cc.responseHeaderTimeout(); d != 0 {
|
if d := cc.responseHeaderTimeout(); d != 0 {
|
||||||
timer := cc.newTimer(d)
|
timer := cc.t.newTimer(d)
|
||||||
defer timer.Stop()
|
defer timer.Stop()
|
||||||
respHeaderTimer = timer.C()
|
respHeaderTimer = timer.C()
|
||||||
respHeaderRecv = cs.respHeaderRecv
|
respHeaderRecv = cs.respHeaderRecv
|
||||||
@ -1590,21 +1533,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
|
|||||||
// or until the request is aborted (via context, error, or otherwise),
|
// or until the request is aborted (via context, error, or otherwise),
|
||||||
// whichever comes first.
|
// whichever comes first.
|
||||||
for {
|
for {
|
||||||
if cc.syncHooks != nil {
|
|
||||||
cc.syncHooks.blockUntil(func() bool {
|
|
||||||
select {
|
|
||||||
case <-cs.peerClosed:
|
|
||||||
case <-respHeaderTimer:
|
|
||||||
case <-respHeaderRecv:
|
|
||||||
case <-cs.abort:
|
|
||||||
case <-ctx.Done():
|
|
||||||
case <-cs.reqCancel:
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
select {
|
select {
|
||||||
case <-cs.peerClosed:
|
case <-cs.peerClosed:
|
||||||
return nil
|
return nil
|
||||||
@ -1753,7 +1681,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
cc.pendingRequests++
|
cc.pendingRequests++
|
||||||
cc.condWait()
|
cc.cond.Wait()
|
||||||
cc.pendingRequests--
|
cc.pendingRequests--
|
||||||
select {
|
select {
|
||||||
case <-cs.abort:
|
case <-cs.abort:
|
||||||
@ -2015,7 +1943,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
|
|||||||
cs.flow.take(take)
|
cs.flow.take(take)
|
||||||
return take, nil
|
return take, nil
|
||||||
}
|
}
|
||||||
cc.condWait()
|
cc.cond.Wait()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2298,7 +2226,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) {
|
|||||||
}
|
}
|
||||||
// Wake up writeRequestBody via clientStream.awaitFlowControl and
|
// Wake up writeRequestBody via clientStream.awaitFlowControl and
|
||||||
// wake up RoundTrip if there is a pending request.
|
// wake up RoundTrip if there is a pending request.
|
||||||
cc.condBroadcast()
|
cc.cond.Broadcast()
|
||||||
|
|
||||||
closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil
|
closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil
|
||||||
if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 {
|
if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 {
|
||||||
@ -2320,6 +2248,7 @@ type clientConnReadLoop struct {
|
|||||||
|
|
||||||
// readLoop runs in its own goroutine and reads and dispatches frames.
|
// readLoop runs in its own goroutine and reads and dispatches frames.
|
||||||
func (cc *ClientConn) readLoop() {
|
func (cc *ClientConn) readLoop() {
|
||||||
|
cc.t.markNewGoroutine()
|
||||||
rl := &clientConnReadLoop{cc: cc}
|
rl := &clientConnReadLoop{cc: cc}
|
||||||
defer rl.cleanup()
|
defer rl.cleanup()
|
||||||
cc.readerErr = rl.run()
|
cc.readerErr = rl.run()
|
||||||
@ -2386,7 +2315,7 @@ func (rl *clientConnReadLoop) cleanup() {
|
|||||||
cs.abortStreamLocked(err)
|
cs.abortStreamLocked(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cc.condBroadcast()
|
cc.cond.Broadcast()
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2423,7 +2352,7 @@ func (rl *clientConnReadLoop) run() error {
|
|||||||
readIdleTimeout := cc.t.ReadIdleTimeout
|
readIdleTimeout := cc.t.ReadIdleTimeout
|
||||||
var t timer
|
var t timer
|
||||||
if readIdleTimeout != 0 {
|
if readIdleTimeout != 0 {
|
||||||
t = cc.afterFunc(readIdleTimeout, cc.healthCheck)
|
t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck)
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
f, err := cc.fr.ReadFrame()
|
f, err := cc.fr.ReadFrame()
|
||||||
@ -3021,7 +2950,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
|
|||||||
for _, cs := range cc.streams {
|
for _, cs := range cc.streams {
|
||||||
cs.flow.add(delta)
|
cs.flow.add(delta)
|
||||||
}
|
}
|
||||||
cc.condBroadcast()
|
cc.cond.Broadcast()
|
||||||
|
|
||||||
cc.initialWindowSize = s.Val
|
cc.initialWindowSize = s.Val
|
||||||
case SettingHeaderTableSize:
|
case SettingHeaderTableSize:
|
||||||
@ -3076,7 +3005,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
|
|||||||
|
|
||||||
return ConnectionError(ErrCodeFlowControl)
|
return ConnectionError(ErrCodeFlowControl)
|
||||||
}
|
}
|
||||||
cc.condBroadcast()
|
cc.cond.Broadcast()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3120,7 +3049,8 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
var pingError error
|
var pingError error
|
||||||
errc := make(chan struct{})
|
errc := make(chan struct{})
|
||||||
cc.goRun(func() {
|
go func() {
|
||||||
|
cc.t.markNewGoroutine()
|
||||||
cc.wmu.Lock()
|
cc.wmu.Lock()
|
||||||
defer cc.wmu.Unlock()
|
defer cc.wmu.Unlock()
|
||||||
if pingError = cc.fr.WritePing(false, p); pingError != nil {
|
if pingError = cc.fr.WritePing(false, p); pingError != nil {
|
||||||
@ -3131,20 +3061,7 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
|
|||||||
close(errc)
|
close(errc)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
})
|
}()
|
||||||
if cc.syncHooks != nil {
|
|
||||||
cc.syncHooks.blockUntil(func() bool {
|
|
||||||
select {
|
|
||||||
case <-c:
|
|
||||||
case <-errc:
|
|
||||||
case <-ctx.Done():
|
|
||||||
case <-cc.readerDone:
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
select {
|
select {
|
||||||
case <-c:
|
case <-c:
|
||||||
return nil
|
return nil
|
||||||
|
4
api/vendor/golang.org/x/net/http2/writesched_priority.go
generated
vendored
4
api/vendor/golang.org/x/net/http2/writesched_priority.go
generated
vendored
@ -443,8 +443,8 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
|
func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
|
||||||
for k := n.kids; k != nil; k = k.next {
|
for n.kids != nil {
|
||||||
k.setParent(n.parent)
|
n.kids.setParent(n.parent)
|
||||||
}
|
}
|
||||||
n.setParent(nil)
|
n.setParent(nil)
|
||||||
delete(ws.nodes, n.id)
|
delete(ws.nodes, n.id)
|
||||||
|
2
api/vendor/k8s.io/api/core/v1/doc.go
generated
vendored
2
api/vendor/k8s.io/api/core/v1/doc.go
generated
vendored
@ -17,6 +17,8 @@ limitations under the License.
|
|||||||
// +k8s:openapi-gen=true
|
// +k8s:openapi-gen=true
|
||||||
// +k8s:deepcopy-gen=package
|
// +k8s:deepcopy-gen=package
|
||||||
// +k8s:protobuf-gen=package
|
// +k8s:protobuf-gen=package
|
||||||
|
// +k8s:prerelease-lifecycle-gen=true
|
||||||
|
// +groupName=
|
||||||
|
|
||||||
// Package v1 is the v1 version of the core API.
|
// Package v1 is the v1 version of the core API.
|
||||||
package v1 // import "k8s.io/api/core/v1"
|
package v1 // import "k8s.io/api/core/v1"
|
||||||
|
4168
api/vendor/k8s.io/api/core/v1/generated.pb.go
generated
vendored
4168
api/vendor/k8s.io/api/core/v1/generated.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
452
api/vendor/k8s.io/api/core/v1/generated.proto
generated
vendored
452
api/vendor/k8s.io/api/core/v1/generated.proto
generated
vendored
File diff suppressed because it is too large
Load Diff
353
api/vendor/k8s.io/api/core/v1/types.go
generated
vendored
353
api/vendor/k8s.io/api/core/v1/types.go
generated
vendored
@ -181,6 +181,23 @@ type VolumeSource struct {
|
|||||||
//
|
//
|
||||||
// +optional
|
// +optional
|
||||||
Ephemeral *EphemeralVolumeSource `json:"ephemeral,omitempty" protobuf:"bytes,29,opt,name=ephemeral"`
|
Ephemeral *EphemeralVolumeSource `json:"ephemeral,omitempty" protobuf:"bytes,29,opt,name=ephemeral"`
|
||||||
|
// image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine.
|
||||||
|
// The volume is resolved at pod startup depending on which PullPolicy value is provided:
|
||||||
|
//
|
||||||
|
// - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
|
||||||
|
// - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
|
||||||
|
// - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
|
||||||
|
//
|
||||||
|
// The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation.
|
||||||
|
// A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message.
|
||||||
|
// The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
|
||||||
|
// The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
|
||||||
|
// The volume will be mounted read-only (ro) and non-executable files (noexec).
|
||||||
|
// Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
|
||||||
|
// The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
|
||||||
|
// +featureGate=ImageVolume
|
||||||
|
// +optional
|
||||||
|
Image *ImageVolumeSource `json:"image,omitempty" protobuf:"bytes,30,opt,name=image"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
|
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
|
||||||
@ -295,6 +312,7 @@ const (
|
|||||||
// +genclient
|
// +genclient
|
||||||
// +genclient:nonNamespaced
|
// +genclient:nonNamespaced
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// PersistentVolume (PV) is a storage resource provisioned by an administrator.
|
// PersistentVolume (PV) is a storage resource provisioned by an administrator.
|
||||||
// It is analogous to a node.
|
// It is analogous to a node.
|
||||||
@ -371,7 +389,7 @@ type PersistentVolumeSpec struct {
|
|||||||
// after a volume has been updated successfully to a new class.
|
// after a volume has been updated successfully to a new class.
|
||||||
// For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound
|
// For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound
|
||||||
// PersistentVolumeClaims during the binding process.
|
// PersistentVolumeClaims during the binding process.
|
||||||
// This is an alpha field and requires enabling VolumeAttributesClass feature.
|
// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
|
||||||
// +featureGate=VolumeAttributesClass
|
// +featureGate=VolumeAttributesClass
|
||||||
// +optional
|
// +optional
|
||||||
VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,10,opt,name=volumeAttributesClassName"`
|
VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,10,opt,name=volumeAttributesClassName"`
|
||||||
@ -425,13 +443,12 @@ type PersistentVolumeStatus struct {
|
|||||||
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
|
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
|
||||||
// lastPhaseTransitionTime is the time the phase transitioned from one to another
|
// lastPhaseTransitionTime is the time the phase transitioned from one to another
|
||||||
// and automatically resets to current time everytime a volume phase transitions.
|
// and automatically resets to current time everytime a volume phase transitions.
|
||||||
// This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default).
|
|
||||||
// +featureGate=PersistentVolumeLastPhaseTransitionTime
|
|
||||||
// +optional
|
// +optional
|
||||||
LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastPhaseTransitionTime"`
|
LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastPhaseTransitionTime"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// PersistentVolumeList is a list of PersistentVolume items.
|
// PersistentVolumeList is a list of PersistentVolume items.
|
||||||
type PersistentVolumeList struct {
|
type PersistentVolumeList struct {
|
||||||
@ -447,6 +464,7 @@ type PersistentVolumeList struct {
|
|||||||
|
|
||||||
// +genclient
|
// +genclient
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// PersistentVolumeClaim is a user's request for and claim to a persistent volume
|
// PersistentVolumeClaim is a user's request for and claim to a persistent volume
|
||||||
type PersistentVolumeClaim struct {
|
type PersistentVolumeClaim struct {
|
||||||
@ -469,6 +487,7 @@ type PersistentVolumeClaim struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// PersistentVolumeClaimList is a list of PersistentVolumeClaim items.
|
// PersistentVolumeClaimList is a list of PersistentVolumeClaim items.
|
||||||
type PersistentVolumeClaimList struct {
|
type PersistentVolumeClaimList struct {
|
||||||
@ -557,7 +576,7 @@ type PersistentVolumeClaimSpec struct {
|
|||||||
// set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
|
// set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
|
||||||
// exists.
|
// exists.
|
||||||
// More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
|
// More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
|
||||||
// (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
|
// (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
|
||||||
// +featureGate=VolumeAttributesClass
|
// +featureGate=VolumeAttributesClass
|
||||||
// +optional
|
// +optional
|
||||||
VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"`
|
VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"`
|
||||||
@ -581,15 +600,29 @@ type TypedObjectReference struct {
|
|||||||
Namespace *string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"`
|
Namespace *string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type
|
// PersistentVolumeClaimConditionType defines the condition of PV claim.
|
||||||
|
// Valid values are:
|
||||||
|
// - "Resizing", "FileSystemResizePending"
|
||||||
|
//
|
||||||
|
// If RecoverVolumeExpansionFailure feature gate is enabled, then following additional values can be expected:
|
||||||
|
// - "ControllerResizeError", "NodeResizeError"
|
||||||
|
//
|
||||||
|
// If VolumeAttributesClass feature gate is enabled, then following additional values can be expected:
|
||||||
|
// - "ModifyVolumeError", "ModifyingVolume"
|
||||||
type PersistentVolumeClaimConditionType string
|
type PersistentVolumeClaimConditionType string
|
||||||
|
|
||||||
|
// These are valid conditions of PVC
|
||||||
const (
|
const (
|
||||||
// PersistentVolumeClaimResizing - a user trigger resize of pvc has been started
|
// PersistentVolumeClaimResizing - a user trigger resize of pvc has been started
|
||||||
PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing"
|
PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing"
|
||||||
// PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node
|
// PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node
|
||||||
PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending"
|
PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending"
|
||||||
|
|
||||||
|
// PersistentVolumeClaimControllerResizeError indicates an error while resizing volume for size in the controller
|
||||||
|
PersistentVolumeClaimControllerResizeError PersistentVolumeClaimConditionType = "ControllerResizeError"
|
||||||
|
// PersistentVolumeClaimNodeResizeError indicates an error while resizing volume for size in the node.
|
||||||
|
PersistentVolumeClaimNodeResizeError PersistentVolumeClaimConditionType = "NodeResizeError"
|
||||||
|
|
||||||
// Applying the target VolumeAttributesClass encountered an error
|
// Applying the target VolumeAttributesClass encountered an error
|
||||||
PersistentVolumeClaimVolumeModifyVolumeError PersistentVolumeClaimConditionType = "ModifyVolumeError"
|
PersistentVolumeClaimVolumeModifyVolumeError PersistentVolumeClaimConditionType = "ModifyVolumeError"
|
||||||
// Volume is being modified
|
// Volume is being modified
|
||||||
@ -606,18 +639,19 @@ const (
|
|||||||
// State set when resize controller starts resizing the volume in control-plane.
|
// State set when resize controller starts resizing the volume in control-plane.
|
||||||
PersistentVolumeClaimControllerResizeInProgress ClaimResourceStatus = "ControllerResizeInProgress"
|
PersistentVolumeClaimControllerResizeInProgress ClaimResourceStatus = "ControllerResizeInProgress"
|
||||||
|
|
||||||
// State set when resize has failed in resize controller with a terminal error.
|
// State set when resize has failed in resize controller with a terminal unrecoverable error.
|
||||||
// Transient errors such as timeout should not set this status and should leave allocatedResourceStatus
|
// Transient errors such as timeout should not set this status and should leave allocatedResourceStatus
|
||||||
// unmodified, so as resize controller can resume the volume expansion.
|
// unmodified, so as resize controller can resume the volume expansion.
|
||||||
PersistentVolumeClaimControllerResizeFailed ClaimResourceStatus = "ControllerResizeFailed"
|
PersistentVolumeClaimControllerResizeInfeasible ClaimResourceStatus = "ControllerResizeInfeasible"
|
||||||
|
|
||||||
// State set when resize controller has finished resizing the volume but further resizing of volume
|
// State set when resize controller has finished resizing the volume but further resizing of volume
|
||||||
// is needed on the node.
|
// is needed on the node.
|
||||||
PersistentVolumeClaimNodeResizePending ClaimResourceStatus = "NodeResizePending"
|
PersistentVolumeClaimNodeResizePending ClaimResourceStatus = "NodeResizePending"
|
||||||
// State set when kubelet starts resizing the volume.
|
// State set when kubelet starts resizing the volume.
|
||||||
PersistentVolumeClaimNodeResizeInProgress ClaimResourceStatus = "NodeResizeInProgress"
|
PersistentVolumeClaimNodeResizeInProgress ClaimResourceStatus = "NodeResizeInProgress"
|
||||||
// State set when resizing has failed in kubelet with a terminal error. Transient errors don't set NodeResizeFailed
|
// State set when resizing has failed in kubelet with a terminal unrecoverable error. Transient errors
|
||||||
PersistentVolumeClaimNodeResizeFailed ClaimResourceStatus = "NodeResizeFailed"
|
// shouldn't set this status
|
||||||
|
PersistentVolumeClaimNodeResizeInfeasible ClaimResourceStatus = "NodeResizeInfeasible"
|
||||||
)
|
)
|
||||||
|
|
||||||
// +enum
|
// +enum
|
||||||
@ -763,13 +797,13 @@ type PersistentVolumeClaimStatus struct {
|
|||||||
AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"`
|
AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"`
|
||||||
// currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
|
// currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
|
||||||
// When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
|
// When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
|
||||||
// This is an alpha field and requires enabling VolumeAttributesClass feature.
|
// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
|
||||||
// +featureGate=VolumeAttributesClass
|
// +featureGate=VolumeAttributesClass
|
||||||
// +optional
|
// +optional
|
||||||
CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty" protobuf:"bytes,8,opt,name=currentVolumeAttributesClassName"`
|
CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty" protobuf:"bytes,8,opt,name=currentVolumeAttributesClassName"`
|
||||||
// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
|
// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
|
||||||
// When this is unset, there is no ModifyVolume operation being attempted.
|
// When this is unset, there is no ModifyVolume operation being attempted.
|
||||||
// This is an alpha field and requires enabling VolumeAttributesClass feature.
|
// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
|
||||||
// +featureGate=VolumeAttributesClass
|
// +featureGate=VolumeAttributesClass
|
||||||
// +optional
|
// +optional
|
||||||
ModifyVolumeStatus *ModifyVolumeStatus `json:"modifyVolumeStatus,omitempty" protobuf:"bytes,9,opt,name=modifyVolumeStatus"`
|
ModifyVolumeStatus *ModifyVolumeStatus `json:"modifyVolumeStatus,omitempty" protobuf:"bytes,9,opt,name=modifyVolumeStatus"`
|
||||||
@ -943,16 +977,19 @@ type RBDVolumeSource struct {
|
|||||||
// Default is rbd.
|
// Default is rbd.
|
||||||
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="rbd"
|
||||||
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
|
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
|
||||||
// user is the rados user name.
|
// user is the rados user name.
|
||||||
// Default is admin.
|
// Default is admin.
|
||||||
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="admin"
|
||||||
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
|
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
|
||||||
// keyring is the path to key ring for RBDUser.
|
// keyring is the path to key ring for RBDUser.
|
||||||
// Default is /etc/ceph/keyring.
|
// Default is /etc/ceph/keyring.
|
||||||
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="/etc/ceph/keyring"
|
||||||
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
|
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
|
||||||
// secretRef is name of the authentication secret for RBDUser. If provided
|
// secretRef is name of the authentication secret for RBDUser. If provided
|
||||||
// overrides keyring.
|
// overrides keyring.
|
||||||
@ -988,16 +1025,19 @@ type RBDPersistentVolumeSource struct {
|
|||||||
// Default is rbd.
|
// Default is rbd.
|
||||||
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="rbd"
|
||||||
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
|
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
|
||||||
// user is the rados user name.
|
// user is the rados user name.
|
||||||
// Default is admin.
|
// Default is admin.
|
||||||
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="admin"
|
||||||
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
|
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
|
||||||
// keyring is the path to key ring for RBDUser.
|
// keyring is the path to key ring for RBDUser.
|
||||||
// Default is /etc/ceph/keyring.
|
// Default is /etc/ceph/keyring.
|
||||||
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="/etc/ceph/keyring"
|
||||||
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
|
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
|
||||||
// secretRef is name of the authentication secret for RBDUser. If provided
|
// secretRef is name of the authentication secret for RBDUser. If provided
|
||||||
// overrides keyring.
|
// overrides keyring.
|
||||||
@ -1426,6 +1466,7 @@ type ISCSIVolumeSource struct {
|
|||||||
// iscsiInterface is the interface Name that uses an iSCSI transport.
|
// iscsiInterface is the interface Name that uses an iSCSI transport.
|
||||||
// Defaults to 'default' (tcp).
|
// Defaults to 'default' (tcp).
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="default"
|
||||||
ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
|
ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
|
||||||
// fsType is the filesystem type of the volume that you want to mount.
|
// fsType is the filesystem type of the volume that you want to mount.
|
||||||
// Tip: Ensure that the filesystem type is supported by the host operating system.
|
// Tip: Ensure that the filesystem type is supported by the host operating system.
|
||||||
@ -1473,6 +1514,7 @@ type ISCSIPersistentVolumeSource struct {
|
|||||||
// iscsiInterface is the interface Name that uses an iSCSI transport.
|
// iscsiInterface is the interface Name that uses an iSCSI transport.
|
||||||
// Defaults to 'default' (tcp).
|
// Defaults to 'default' (tcp).
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="default"
|
||||||
ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
|
ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
|
||||||
// fsType is the filesystem type of the volume that you want to mount.
|
// fsType is the filesystem type of the volume that you want to mount.
|
||||||
// Tip: Ensure that the filesystem type is supported by the host operating system.
|
// Tip: Ensure that the filesystem type is supported by the host operating system.
|
||||||
@ -1613,17 +1655,21 @@ type AzureDiskVolumeSource struct {
|
|||||||
DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"`
|
DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"`
|
||||||
// cachingMode is the Host Caching mode: None, Read Only, Read Write.
|
// cachingMode is the Host Caching mode: None, Read Only, Read Write.
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default=ref(AzureDataDiskCachingReadWrite)
|
||||||
CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"`
|
CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"`
|
||||||
// fsType is Filesystem type to mount.
|
// fsType is Filesystem type to mount.
|
||||||
// Must be a filesystem type supported by the host operating system.
|
// Must be a filesystem type supported by the host operating system.
|
||||||
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
|
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="ext4"
|
||||||
FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"`
|
FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"`
|
||||||
// readOnly Defaults to false (read/write). ReadOnly here will force
|
// readOnly Defaults to false (read/write). ReadOnly here will force
|
||||||
// the ReadOnly setting in VolumeMounts.
|
// the ReadOnly setting in VolumeMounts.
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default=false
|
||||||
ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"`
|
ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"`
|
||||||
// kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
|
// kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
|
||||||
|
// +default=ref(AzureSharedBlobDisk)
|
||||||
Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"`
|
Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1662,6 +1708,7 @@ type ScaleIOVolumeSource struct {
|
|||||||
// storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
|
// storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
|
||||||
// Default is ThinProvisioned.
|
// Default is ThinProvisioned.
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="ThinProvisioned"
|
||||||
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
|
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
|
||||||
// volumeName is the name of a volume already created in the ScaleIO system
|
// volumeName is the name of a volume already created in the ScaleIO system
|
||||||
// that is associated with this volume source.
|
// that is associated with this volume source.
|
||||||
@ -1671,6 +1718,7 @@ type ScaleIOVolumeSource struct {
|
|||||||
// Ex. "ext4", "xfs", "ntfs".
|
// Ex. "ext4", "xfs", "ntfs".
|
||||||
// Default is "xfs".
|
// Default is "xfs".
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="xfs"
|
||||||
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
|
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
|
||||||
// readOnly Defaults to false (read/write). ReadOnly here will force
|
// readOnly Defaults to false (read/write). ReadOnly here will force
|
||||||
// the ReadOnly setting in VolumeMounts.
|
// the ReadOnly setting in VolumeMounts.
|
||||||
@ -1699,6 +1747,7 @@ type ScaleIOPersistentVolumeSource struct {
|
|||||||
// storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
|
// storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
|
||||||
// Default is ThinProvisioned.
|
// Default is ThinProvisioned.
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="ThinProvisioned"
|
||||||
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
|
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
|
||||||
// volumeName is the name of a volume already created in the ScaleIO system
|
// volumeName is the name of a volume already created in the ScaleIO system
|
||||||
// that is associated with this volume source.
|
// that is associated with this volume source.
|
||||||
@ -1708,6 +1757,7 @@ type ScaleIOPersistentVolumeSource struct {
|
|||||||
// Ex. "ext4", "xfs", "ntfs".
|
// Ex. "ext4", "xfs", "ntfs".
|
||||||
// Default is "xfs"
|
// Default is "xfs"
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="xfs"
|
||||||
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
|
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
|
||||||
// readOnly defaults to false (read/write). ReadOnly here will force
|
// readOnly defaults to false (read/write). ReadOnly here will force
|
||||||
// the ReadOnly setting in VolumeMounts.
|
// the ReadOnly setting in VolumeMounts.
|
||||||
@ -1891,7 +1941,8 @@ type ClusterTrustBundleProjection struct {
|
|||||||
|
|
||||||
// Represents a projected volume source
|
// Represents a projected volume source
|
||||||
type ProjectedVolumeSource struct {
|
type ProjectedVolumeSource struct {
|
||||||
// sources is the list of volume projections
|
// sources is the list of volume projections. Each entry in this list
|
||||||
|
// handles one source.
|
||||||
// +optional
|
// +optional
|
||||||
// +listType=atomic
|
// +listType=atomic
|
||||||
Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"`
|
Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"`
|
||||||
@ -1905,10 +1956,9 @@ type ProjectedVolumeSource struct {
|
|||||||
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
|
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Projection that may be projected along with other supported volume types
|
// Projection that may be projected along with other supported volume types.
|
||||||
|
// Exactly one of these fields must be set.
|
||||||
type VolumeProjection struct {
|
type VolumeProjection struct {
|
||||||
// all types below are the supported types for projection into the same volume
|
|
||||||
|
|
||||||
// secret information about the secret data to project
|
// secret information about the secret data to project
|
||||||
// +optional
|
// +optional
|
||||||
Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"`
|
Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"`
|
||||||
@ -2631,6 +2681,13 @@ type ResourceClaim struct {
|
|||||||
// the Pod where this field is used. It makes that resource available
|
// the Pod where this field is used. It makes that resource available
|
||||||
// inside a container.
|
// inside a container.
|
||||||
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
|
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
|
||||||
|
|
||||||
|
// Request is the name chosen for a request in the referenced claim.
|
||||||
|
// If empty, everything from the claim is made available, otherwise
|
||||||
|
// only the result of this request.
|
||||||
|
//
|
||||||
|
// +optional
|
||||||
|
Request string `json:"request,omitempty" protobuf:"bytes,2,opt,name=request"`
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -3030,6 +3087,93 @@ type ContainerStatus struct {
|
|||||||
// +listMapKey=mountPath
|
// +listMapKey=mountPath
|
||||||
// +featureGate=RecursiveReadOnlyMounts
|
// +featureGate=RecursiveReadOnlyMounts
|
||||||
VolumeMounts []VolumeMountStatus `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,12,rep,name=volumeMounts"`
|
VolumeMounts []VolumeMountStatus `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,12,rep,name=volumeMounts"`
|
||||||
|
// User represents user identity information initially attached to the first process of the container
|
||||||
|
// +featureGate=SupplementalGroupsPolicy
|
||||||
|
// +optional
|
||||||
|
User *ContainerUser `json:"user,omitempty" protobuf:"bytes,13,opt,name=user,casttype=ContainerUser"`
|
||||||
|
// AllocatedResourcesStatus represents the status of various resources
|
||||||
|
// allocated for this Pod.
|
||||||
|
// +featureGate=ResourceHealthStatus
|
||||||
|
// +optional
|
||||||
|
// +patchMergeKey=name
|
||||||
|
// +patchStrategy=merge
|
||||||
|
// +listType=map
|
||||||
|
// +listMapKey=name
|
||||||
|
AllocatedResourcesStatus []ResourceStatus `json:"allocatedResourcesStatus,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,14,rep,name=allocatedResourcesStatus"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceStatus struct {
|
||||||
|
// Name of the resource. Must be unique within the pod and match one of the resources from the pod spec.
|
||||||
|
// +required
|
||||||
|
Name ResourceName `json:"name" protobuf:"bytes,1,opt,name=name"`
|
||||||
|
// List of unique Resources health. Each element in the list contains an unique resource ID and resource health.
|
||||||
|
// At a minimum, ResourceID must uniquely identify the Resource
|
||||||
|
// allocated to the Pod on the Node for the lifetime of a Pod.
|
||||||
|
// See ResourceID type for it's definition.
|
||||||
|
// +listType=map
|
||||||
|
// +listMapKey=resourceID
|
||||||
|
Resources []ResourceHealth `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceHealthStatus string
|
||||||
|
|
||||||
|
const (
|
||||||
|
ResourceHealthStatusHealthy ResourceHealthStatus = "Healthy"
|
||||||
|
ResourceHealthStatusUnhealthy ResourceHealthStatus = "Unhealthy"
|
||||||
|
ResourceHealthStatusUnknown ResourceHealthStatus = "Unknown"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ResourceID is calculated based on the source of this resource health information.
|
||||||
|
// For DevicePlugin:
|
||||||
|
//
|
||||||
|
// deviceplugin:DeviceID, where DeviceID is from the Device structure of DevicePlugin's ListAndWatchResponse type: https://github.com/kubernetes/kubernetes/blob/eda1c780543a27c078450e2f17d674471e00f494/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1alpha/api.proto#L61-L73
|
||||||
|
//
|
||||||
|
// DevicePlugin ID is usually a constant for the lifetime of a Node and typically can be used to uniquely identify the device on the node.
|
||||||
|
// For DRA:
|
||||||
|
//
|
||||||
|
// dra:<driver name>/<pool name>/<device name>: such a device can be looked up in the information published by that DRA driver to learn more about it. It is designed to be globally unique in a cluster.
|
||||||
|
type ResourceID string
|
||||||
|
|
||||||
|
// ResourceHealth represents the health of a resource. It has the latest device health information.
|
||||||
|
// This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP.
|
||||||
|
type ResourceHealth struct {
|
||||||
|
// ResourceID is the unique identifier of the resource. See the ResourceID type for more information.
|
||||||
|
ResourceID ResourceID `json:"resourceID" protobuf:"bytes,1,opt,name=resourceID"`
|
||||||
|
// Health of the resource.
|
||||||
|
// can be one of:
|
||||||
|
// - Healthy: operates as normal
|
||||||
|
// - Unhealthy: reported unhealthy. We consider this a temporary health issue
|
||||||
|
// since we do not have a mechanism today to distinguish
|
||||||
|
// temporary and permanent issues.
|
||||||
|
// - Unknown: The status cannot be determined.
|
||||||
|
// For example, Device Plugin got unregistered and hasn't been re-registered since.
|
||||||
|
//
|
||||||
|
// In future we may want to introduce the PermanentlyUnhealthy Status.
|
||||||
|
Health ResourceHealthStatus `json:"health,omitempty" protobuf:"bytes,2,name=health"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainerUser represents user identity information
|
||||||
|
type ContainerUser struct {
|
||||||
|
// Linux holds user identity information initially attached to the first process of the containers in Linux.
|
||||||
|
// Note that the actual running identity can be changed if the process has enough privilege to do so.
|
||||||
|
// +optional
|
||||||
|
Linux *LinuxContainerUser `json:"linux,omitempty" protobuf:"bytes,1,opt,name=linux,casttype=LinuxContainerUser"`
|
||||||
|
|
||||||
|
// Windows holds user identity information initially attached to the first process of the containers in Windows
|
||||||
|
// This is just reserved for future use.
|
||||||
|
// Windows *WindowsContainerUser
|
||||||
|
}
|
||||||
|
|
||||||
|
// LinuxContainerUser represents user identity information in Linux containers
|
||||||
|
type LinuxContainerUser struct {
|
||||||
|
// UID is the primary uid initially attached to the first process in the container
|
||||||
|
UID int64 `json:"uid" protobuf:"varint,1,name=uid"`
|
||||||
|
// GID is the primary gid initially attached to the first process in the container
|
||||||
|
GID int64 `json:"gid" protobuf:"varint,2,name=gid"`
|
||||||
|
// SupplementalGroups are the supplemental groups initially attached to the first process in the container
|
||||||
|
// +optional
|
||||||
|
// +listType=atomic
|
||||||
|
SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,3,rep,name=supplementalGroups"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodPhase is a label for the condition of a pod at the current time.
|
// PodPhase is a label for the condition of a pod at the current time.
|
||||||
@ -3426,7 +3570,8 @@ type PodAffinityTerm struct {
|
|||||||
// pod labels will be ignored. The default value is empty.
|
// pod labels will be ignored. The default value is empty.
|
||||||
// The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
// The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||||
// Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
// Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||||
// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
|
// This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||||
|
//
|
||||||
// +listType=atomic
|
// +listType=atomic
|
||||||
// +optional
|
// +optional
|
||||||
MatchLabelKeys []string `json:"matchLabelKeys,omitempty" protobuf:"bytes,5,opt,name=matchLabelKeys"`
|
MatchLabelKeys []string `json:"matchLabelKeys,omitempty" protobuf:"bytes,5,opt,name=matchLabelKeys"`
|
||||||
@ -3438,7 +3583,8 @@ type PodAffinityTerm struct {
|
|||||||
// pod labels will be ignored. The default value is empty.
|
// pod labels will be ignored. The default value is empty.
|
||||||
// The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
// The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||||
// Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
// Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||||
// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
|
// This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||||
|
//
|
||||||
// +listType=atomic
|
// +listType=atomic
|
||||||
// +optional
|
// +optional
|
||||||
MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty" protobuf:"bytes,6,opt,name=mismatchLabelKeys"`
|
MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty" protobuf:"bytes,6,opt,name=mismatchLabelKeys"`
|
||||||
@ -3667,9 +3813,11 @@ type PodSpec struct {
|
|||||||
// +optional
|
// +optional
|
||||||
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"`
|
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"`
|
||||||
|
|
||||||
// NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
|
// NodeName indicates in which node this pod is scheduled.
|
||||||
// the scheduler simply schedules this pod onto that node, assuming that it fits resource
|
// If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName.
|
||||||
// requirements.
|
// Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod.
|
||||||
|
// This field should not be used to express a desire for the pod to be scheduled on a specific node.
|
||||||
|
// https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename
|
||||||
// +optional
|
// +optional
|
||||||
NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"`
|
NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"`
|
||||||
// Host networking requested for this pod. Use the host's network namespace.
|
// Host networking requested for this pod. Use the host's network namespace.
|
||||||
@ -3826,6 +3974,7 @@ type PodSpec struct {
|
|||||||
// - spec.securityContext.runAsUser
|
// - spec.securityContext.runAsUser
|
||||||
// - spec.securityContext.runAsGroup
|
// - spec.securityContext.runAsGroup
|
||||||
// - spec.securityContext.supplementalGroups
|
// - spec.securityContext.supplementalGroups
|
||||||
|
// - spec.securityContext.supplementalGroupsPolicy
|
||||||
// - spec.containers[*].securityContext.appArmorProfile
|
// - spec.containers[*].securityContext.appArmorProfile
|
||||||
// - spec.containers[*].securityContext.seLinuxOptions
|
// - spec.containers[*].securityContext.seLinuxOptions
|
||||||
// - spec.containers[*].securityContext.seccompProfile
|
// - spec.containers[*].securityContext.seccompProfile
|
||||||
@ -3883,7 +4032,10 @@ type PodSpec struct {
|
|||||||
ResourceClaims []PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"`
|
ResourceClaims []PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodResourceClaim references exactly one ResourceClaim through a ClaimSource.
|
// PodResourceClaim references exactly one ResourceClaim, either directly
|
||||||
|
// or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim
|
||||||
|
// for the pod.
|
||||||
|
//
|
||||||
// It adds a name to it that uniquely identifies the ResourceClaim inside the Pod.
|
// It adds a name to it that uniquely identifies the ResourceClaim inside the Pod.
|
||||||
// Containers that need access to the ResourceClaim reference it with this name.
|
// Containers that need access to the ResourceClaim reference it with this name.
|
||||||
type PodResourceClaim struct {
|
type PodResourceClaim struct {
|
||||||
@ -3891,18 +4043,17 @@ type PodResourceClaim struct {
|
|||||||
// This must be a DNS_LABEL.
|
// This must be a DNS_LABEL.
|
||||||
Name string `json:"name" protobuf:"bytes,1,name=name"`
|
Name string `json:"name" protobuf:"bytes,1,name=name"`
|
||||||
|
|
||||||
// Source describes where to find the ResourceClaim.
|
// Source is tombstoned since Kubernetes 1.31 where it got replaced with
|
||||||
Source ClaimSource `json:"source,omitempty" protobuf:"bytes,2,name=source"`
|
// the inlined fields below.
|
||||||
}
|
//
|
||||||
|
// Source ClaimSource `json:"source,omitempty" protobuf:"bytes,2,name=source"`
|
||||||
|
|
||||||
// ClaimSource describes a reference to a ResourceClaim.
|
|
||||||
//
|
|
||||||
// Exactly one of these fields should be set. Consumers of this type must
|
|
||||||
// treat an empty object as if it has an unknown value.
|
|
||||||
type ClaimSource struct {
|
|
||||||
// ResourceClaimName is the name of a ResourceClaim object in the same
|
// ResourceClaimName is the name of a ResourceClaim object in the same
|
||||||
// namespace as this pod.
|
// namespace as this pod.
|
||||||
ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,1,opt,name=resourceClaimName"`
|
//
|
||||||
|
// Exactly one of ResourceClaimName and ResourceClaimTemplateName must
|
||||||
|
// be set.
|
||||||
|
ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,3,opt,name=resourceClaimName"`
|
||||||
|
|
||||||
// ResourceClaimTemplateName is the name of a ResourceClaimTemplate
|
// ResourceClaimTemplateName is the name of a ResourceClaimTemplate
|
||||||
// object in the same namespace as this pod.
|
// object in the same namespace as this pod.
|
||||||
@ -3916,7 +4067,10 @@ type ClaimSource struct {
|
|||||||
// This field is immutable and no changes will be made to the
|
// This field is immutable and no changes will be made to the
|
||||||
// corresponding ResourceClaim by the control plane after creating the
|
// corresponding ResourceClaim by the control plane after creating the
|
||||||
// ResourceClaim.
|
// ResourceClaim.
|
||||||
ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty" protobuf:"bytes,2,opt,name=resourceClaimTemplateName"`
|
//
|
||||||
|
// Exactly one of ResourceClaimName and ResourceClaimTemplateName must
|
||||||
|
// be set.
|
||||||
|
ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty" protobuf:"bytes,4,opt,name=resourceClaimTemplateName"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim
|
// PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim
|
||||||
@ -3929,7 +4083,7 @@ type PodResourceClaimStatus struct {
|
|||||||
Name string `json:"name" protobuf:"bytes,1,name=name"`
|
Name string `json:"name" protobuf:"bytes,1,name=name"`
|
||||||
|
|
||||||
// ResourceClaimName is the name of the ResourceClaim that was
|
// ResourceClaimName is the name of the ResourceClaim that was
|
||||||
// generated for the Pod in the namespace of the Pod. It this is
|
// generated for the Pod in the namespace of the Pod. If this is
|
||||||
// unset, then generating a ResourceClaim was not necessary. The
|
// unset, then generating a ResourceClaim was not necessary. The
|
||||||
// pod.spec.resourceClaims entry can be ignored in this case.
|
// pod.spec.resourceClaims entry can be ignored in this case.
|
||||||
//
|
//
|
||||||
@ -4137,6 +4291,23 @@ const (
|
|||||||
FSGroupChangeAlways PodFSGroupChangePolicy = "Always"
|
FSGroupChangeAlways PodFSGroupChangePolicy = "Always"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// SupplementalGroupsPolicy defines how supplemental groups
|
||||||
|
// of the first container processes are calculated.
|
||||||
|
// +enum
|
||||||
|
type SupplementalGroupsPolicy string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// SupplementalGroupsPolicyMerge means that the container's provided
|
||||||
|
// SupplementalGroups and FsGroup (specified in SecurityContext) will be
|
||||||
|
// merged with the primary user's groups as defined in the container image
|
||||||
|
// (in /etc/group).
|
||||||
|
SupplementalGroupsPolicyMerge SupplementalGroupsPolicy = "Merge"
|
||||||
|
// SupplementalGroupsPolicyStrict means that the container's provided
|
||||||
|
// SupplementalGroups and FsGroup (specified in SecurityContext) will be
|
||||||
|
// used instead of any groups defined in the container image.
|
||||||
|
SupplementalGroupsPolicyStrict SupplementalGroupsPolicy = "Strict"
|
||||||
|
)
|
||||||
|
|
||||||
// PodSecurityContext holds pod-level security attributes and common container settings.
|
// PodSecurityContext holds pod-level security attributes and common container settings.
|
||||||
// Some fields are also present in container.securityContext. Field values of
|
// Some fields are also present in container.securityContext. Field values of
|
||||||
// container.securityContext take precedence over field values of PodSecurityContext.
|
// container.securityContext take precedence over field values of PodSecurityContext.
|
||||||
@ -4179,16 +4350,27 @@ type PodSecurityContext struct {
|
|||||||
// PodSecurityContext, the value specified in SecurityContext takes precedence.
|
// PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||||
// +optional
|
// +optional
|
||||||
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"`
|
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"`
|
||||||
// A list of groups applied to the first process run in each container, in addition
|
// A list of groups applied to the first process run in each container, in
|
||||||
// to the container's primary GID, the fsGroup (if specified), and group memberships
|
// addition to the container's primary GID and fsGroup (if specified). If
|
||||||
// defined in the container image for the uid of the container process. If unspecified,
|
// the SupplementalGroupsPolicy feature is enabled, the
|
||||||
// no additional groups are added to any container. Note that group memberships
|
// supplementalGroupsPolicy field determines whether these are in addition
|
||||||
// defined in the container image for the uid of the container process are still effective,
|
// to or instead of any group memberships defined in the container image.
|
||||||
// even if they are not included in this list.
|
// If unspecified, no additional groups are added, though group memberships
|
||||||
|
// defined in the container image may still be used, depending on the
|
||||||
|
// supplementalGroupsPolicy field.
|
||||||
// Note that this field cannot be set when spec.os.name is windows.
|
// Note that this field cannot be set when spec.os.name is windows.
|
||||||
// +optional
|
// +optional
|
||||||
// +listType=atomic
|
// +listType=atomic
|
||||||
SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"`
|
SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"`
|
||||||
|
// Defines how supplemental groups of the first container processes are calculated.
|
||||||
|
// Valid values are "Merge" and "Strict". If not specified, "Merge" is used.
|
||||||
|
// (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled
|
||||||
|
// and the container runtime must implement support for this feature.
|
||||||
|
// Note that this field cannot be set when spec.os.name is windows.
|
||||||
|
// TODO: update the default value to "Merge" when spec.os.name is not windows in v1.34
|
||||||
|
// +featureGate=SupplementalGroupsPolicy
|
||||||
|
// +optional
|
||||||
|
SupplementalGroupsPolicy *SupplementalGroupsPolicy `json:"supplementalGroupsPolicy,omitempty" protobuf:"bytes,12,opt,name=supplementalGroupsPolicy"`
|
||||||
// A special supplemental group that applies to all containers in a pod.
|
// A special supplemental group that applies to all containers in a pod.
|
||||||
// Some volume types allow the Kubelet to change the ownership of that volume
|
// Some volume types allow the Kubelet to change the ownership of that volume
|
||||||
// to be owned by the pod:
|
// to be owned by the pod:
|
||||||
@ -4340,13 +4522,15 @@ type PodDNSConfigOption struct {
|
|||||||
// PodIP represents a single IP address allocated to the pod.
|
// PodIP represents a single IP address allocated to the pod.
|
||||||
type PodIP struct {
|
type PodIP struct {
|
||||||
// IP is the IP address assigned to the pod
|
// IP is the IP address assigned to the pod
|
||||||
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
|
// +required
|
||||||
|
IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// HostIP represents a single IP address allocated to the host.
|
// HostIP represents a single IP address allocated to the host.
|
||||||
type HostIP struct {
|
type HostIP struct {
|
||||||
// IP is the IP address assigned to the host
|
// IP is the IP address assigned to the host
|
||||||
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
|
// +required
|
||||||
|
IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// EphemeralContainerCommon is a copy of all fields in Container to be inlined in
|
// EphemeralContainerCommon is a copy of all fields in Container to be inlined in
|
||||||
@ -4663,6 +4847,7 @@ type PodStatus struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
|
// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
|
||||||
type PodStatusResult struct {
|
type PodStatusResult struct {
|
||||||
@ -4683,6 +4868,7 @@ type PodStatusResult struct {
|
|||||||
// +genclient
|
// +genclient
|
||||||
// +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers
|
// +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// Pod is a collection of containers that can run on a host. This resource is created
|
// Pod is a collection of containers that can run on a host. This resource is created
|
||||||
// by clients and scheduled onto hosts.
|
// by clients and scheduled onto hosts.
|
||||||
@ -4708,6 +4894,7 @@ type Pod struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// PodList is a list of Pods.
|
// PodList is a list of Pods.
|
||||||
type PodList struct {
|
type PodList struct {
|
||||||
@ -4737,6 +4924,7 @@ type PodTemplateSpec struct {
|
|||||||
|
|
||||||
// +genclient
|
// +genclient
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// PodTemplate describes a template for creating copies of a predefined pod.
|
// PodTemplate describes a template for creating copies of a predefined pod.
|
||||||
type PodTemplate struct {
|
type PodTemplate struct {
|
||||||
@ -4753,6 +4941,7 @@ type PodTemplate struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// PodTemplateList is a list of PodTemplates.
|
// PodTemplateList is a list of PodTemplates.
|
||||||
type PodTemplateList struct {
|
type PodTemplateList struct {
|
||||||
@ -4867,6 +5056,7 @@ type ReplicationControllerCondition struct {
|
|||||||
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
|
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||||
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
|
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// ReplicationController represents the configuration of a replication controller.
|
// ReplicationController represents the configuration of a replication controller.
|
||||||
type ReplicationController struct {
|
type ReplicationController struct {
|
||||||
@ -4893,6 +5083,7 @@ type ReplicationController struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// ReplicationControllerList is a collection of replication controllers.
|
// ReplicationControllerList is a collection of replication controllers.
|
||||||
type ReplicationControllerList struct {
|
type ReplicationControllerList struct {
|
||||||
@ -5437,6 +5628,7 @@ type ServicePort struct {
|
|||||||
// +genclient
|
// +genclient
|
||||||
// +genclient:skipVerbs=deleteCollection
|
// +genclient:skipVerbs=deleteCollection
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// Service is a named abstraction of software service (for example, mysql) consisting of local port
|
// Service is a named abstraction of software service (for example, mysql) consisting of local port
|
||||||
// (for example 3306) that the proxy listens on, and the selector that determines which pods
|
// (for example 3306) that the proxy listens on, and the selector that determines which pods
|
||||||
@ -5468,6 +5660,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// ServiceList holds a list of services.
|
// ServiceList holds a list of services.
|
||||||
type ServiceList struct {
|
type ServiceList struct {
|
||||||
@ -5484,6 +5677,7 @@ type ServiceList struct {
|
|||||||
// +genclient
|
// +genclient
|
||||||
// +genclient:method=CreateToken,verb=create,subresource=token,input=k8s.io/api/authentication/v1.TokenRequest,result=k8s.io/api/authentication/v1.TokenRequest
|
// +genclient:method=CreateToken,verb=create,subresource=token,input=k8s.io/api/authentication/v1.TokenRequest,result=k8s.io/api/authentication/v1.TokenRequest
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// ServiceAccount binds together:
|
// ServiceAccount binds together:
|
||||||
// * a name, understood by users, and perhaps by peripheral systems, for an identity
|
// * a name, understood by users, and perhaps by peripheral systems, for an identity
|
||||||
@ -5523,6 +5717,7 @@ type ServiceAccount struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// ServiceAccountList is a list of ServiceAccount objects
|
// ServiceAccountList is a list of ServiceAccount objects
|
||||||
type ServiceAccountList struct {
|
type ServiceAccountList struct {
|
||||||
@ -5539,6 +5734,7 @@ type ServiceAccountList struct {
|
|||||||
|
|
||||||
// +genclient
|
// +genclient
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// Endpoints is a collection of endpoints that implement the actual service. Example:
|
// Endpoints is a collection of endpoints that implement the actual service. Example:
|
||||||
//
|
//
|
||||||
@ -5660,6 +5856,7 @@ type EndpointPort struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// EndpointsList is a list of endpoints.
|
// EndpointsList is a list of endpoints.
|
||||||
type EndpointsList struct {
|
type EndpointsList struct {
|
||||||
@ -5772,13 +5969,16 @@ type NodeDaemonEndpoints struct {
|
|||||||
KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"`
|
KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeRuntimeHandlerFeatures is a set of runtime features.
|
// NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.
|
||||||
type NodeRuntimeHandlerFeatures struct {
|
type NodeRuntimeHandlerFeatures struct {
|
||||||
// RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.
|
// RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.
|
||||||
// +featureGate=RecursiveReadOnlyMounts
|
// +featureGate=RecursiveReadOnlyMounts
|
||||||
// +optional
|
// +optional
|
||||||
RecursiveReadOnlyMounts *bool `json:"recursiveReadOnlyMounts,omitempty" protobuf:"varint,1,opt,name=recursiveReadOnlyMounts"`
|
RecursiveReadOnlyMounts *bool `json:"recursiveReadOnlyMounts,omitempty" protobuf:"varint,1,opt,name=recursiveReadOnlyMounts"`
|
||||||
// Reserved: UserNamespaces *bool (varint 2, for consistency with CRI API)
|
// UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes.
|
||||||
|
// +featureGate=UserNamespacesSupport
|
||||||
|
// +optional
|
||||||
|
UserNamespaces *bool `json:"userNamespaces,omitempty" protobuf:"varint,2,opt,name=userNamespaces"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeRuntimeHandler is a set of runtime handler information.
|
// NodeRuntimeHandler is a set of runtime handler information.
|
||||||
@ -5792,6 +5992,15 @@ type NodeRuntimeHandler struct {
|
|||||||
Features *NodeRuntimeHandlerFeatures `json:"features,omitempty" protobuf:"bytes,2,opt,name=features"`
|
Features *NodeRuntimeHandlerFeatures `json:"features,omitempty" protobuf:"bytes,2,opt,name=features"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NodeFeatures describes the set of features implemented by the CRI implementation.
|
||||||
|
// The features contained in the NodeFeatures should depend only on the cri implementation
|
||||||
|
// independent of runtime handlers.
|
||||||
|
type NodeFeatures struct {
|
||||||
|
// SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser.
|
||||||
|
// +optional
|
||||||
|
SupplementalGroupsPolicy *bool `json:"supplementalGroupsPolicy,omitempty" protobuf:"varint,1,opt,name=supplementalGroupsPolicy"`
|
||||||
|
}
|
||||||
|
|
||||||
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
|
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
|
||||||
type NodeSystemInfo struct {
|
type NodeSystemInfo struct {
|
||||||
// MachineID reported by the node. For unique machine identification
|
// MachineID reported by the node. For unique machine identification
|
||||||
@ -5812,7 +6021,7 @@ type NodeSystemInfo struct {
|
|||||||
ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"`
|
ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"`
|
||||||
// Kubelet Version reported by the node.
|
// Kubelet Version reported by the node.
|
||||||
KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"`
|
KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"`
|
||||||
// KubeProxy Version reported by the node.
|
// Deprecated: KubeProxy Version reported by the node.
|
||||||
KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"`
|
KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"`
|
||||||
// The Operating System reported by the node
|
// The Operating System reported by the node
|
||||||
OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"`
|
OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"`
|
||||||
@ -5870,7 +6079,7 @@ type NodeConfigStatus struct {
|
|||||||
// NodeStatus is information about the current status of a node.
|
// NodeStatus is information about the current status of a node.
|
||||||
type NodeStatus struct {
|
type NodeStatus struct {
|
||||||
// Capacity represents the total resources of a node.
|
// Capacity represents the total resources of a node.
|
||||||
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
|
// More info: https://kubernetes.io/docs/reference/node/node-status/#capacity
|
||||||
// +optional
|
// +optional
|
||||||
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
|
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
|
||||||
// Allocatable represents the resources of a node that are available for scheduling.
|
// Allocatable represents the resources of a node that are available for scheduling.
|
||||||
@ -5930,9 +6139,14 @@ type NodeStatus struct {
|
|||||||
Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"`
|
Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"`
|
||||||
// The available runtime handlers.
|
// The available runtime handlers.
|
||||||
// +featureGate=RecursiveReadOnlyMounts
|
// +featureGate=RecursiveReadOnlyMounts
|
||||||
|
// +featureGate=UserNamespacesSupport
|
||||||
// +optional
|
// +optional
|
||||||
// +listType=atomic
|
// +listType=atomic
|
||||||
RuntimeHandlers []NodeRuntimeHandler `json:"runtimeHandlers,omitempty" protobuf:"bytes,12,rep,name=runtimeHandlers"`
|
RuntimeHandlers []NodeRuntimeHandler `json:"runtimeHandlers,omitempty" protobuf:"bytes,12,rep,name=runtimeHandlers"`
|
||||||
|
// Features describes the set of features implemented by the CRI implementation.
|
||||||
|
// +featureGate=SupplementalGroupsPolicy
|
||||||
|
// +optional
|
||||||
|
Features *NodeFeatures `json:"features,omitempty" protobuf:"bytes,13,rep,name=features"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type UniqueVolumeName string
|
type UniqueVolumeName string
|
||||||
@ -6128,6 +6342,7 @@ type ResourceList map[ResourceName]resource.Quantity
|
|||||||
// +genclient
|
// +genclient
|
||||||
// +genclient:nonNamespaced
|
// +genclient:nonNamespaced
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// Node is a worker node in Kubernetes.
|
// Node is a worker node in Kubernetes.
|
||||||
// Each node will have a unique identifier in the cache (i.e. in etcd).
|
// Each node will have a unique identifier in the cache (i.e. in etcd).
|
||||||
@ -6152,6 +6367,7 @@ type Node struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// NodeList is the whole list of all Nodes which have been registered with master.
|
// NodeList is the whole list of all Nodes which have been registered with master.
|
||||||
type NodeList struct {
|
type NodeList struct {
|
||||||
@ -6250,6 +6466,7 @@ type NamespaceCondition struct {
|
|||||||
// +genclient:nonNamespaced
|
// +genclient:nonNamespaced
|
||||||
// +genclient:skipVerbs=deleteCollection
|
// +genclient:skipVerbs=deleteCollection
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// Namespace provides a scope for Names.
|
// Namespace provides a scope for Names.
|
||||||
// Use of multiple namespaces is optional.
|
// Use of multiple namespaces is optional.
|
||||||
@ -6272,6 +6489,7 @@ type Namespace struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// NamespaceList is a list of Namespaces.
|
// NamespaceList is a list of Namespaces.
|
||||||
type NamespaceList struct {
|
type NamespaceList struct {
|
||||||
@ -6287,6 +6505,7 @@ type NamespaceList struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// Binding ties one object to another; for example, a pod is bound to a node by a scheduler.
|
// Binding ties one object to another; for example, a pod is bound to a node by a scheduler.
|
||||||
// Deprecated in 1.7, please use the bindings subresource of pods instead.
|
// Deprecated in 1.7, please use the bindings subresource of pods instead.
|
||||||
@ -6311,6 +6530,7 @@ type Preconditions struct {
|
|||||||
|
|
||||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// PodLogOptions is the query options for a Pod's logs REST call.
|
// PodLogOptions is the query options for a Pod's logs REST call.
|
||||||
type PodLogOptions struct {
|
type PodLogOptions struct {
|
||||||
@ -6363,6 +6583,7 @@ type PodLogOptions struct {
|
|||||||
|
|
||||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.1
|
||||||
|
|
||||||
// PodAttachOptions is the query options to a Pod's remote attach call.
|
// PodAttachOptions is the query options to a Pod's remote attach call.
|
||||||
// ---
|
// ---
|
||||||
@ -6401,6 +6622,7 @@ type PodAttachOptions struct {
|
|||||||
|
|
||||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// PodExecOptions is the query options to a Pod's remote exec call.
|
// PodExecOptions is the query options to a Pod's remote exec call.
|
||||||
// ---
|
// ---
|
||||||
@ -6439,6 +6661,7 @@ type PodExecOptions struct {
|
|||||||
|
|
||||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.6
|
||||||
|
|
||||||
// PodPortForwardOptions is the query options to a Pod's port forward call
|
// PodPortForwardOptions is the query options to a Pod's port forward call
|
||||||
// when using WebSockets.
|
// when using WebSockets.
|
||||||
@ -6458,6 +6681,7 @@ type PodPortForwardOptions struct {
|
|||||||
|
|
||||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// PodProxyOptions is the query options to a Pod's proxy call.
|
// PodProxyOptions is the query options to a Pod's proxy call.
|
||||||
type PodProxyOptions struct {
|
type PodProxyOptions struct {
|
||||||
@ -6470,6 +6694,7 @@ type PodProxyOptions struct {
|
|||||||
|
|
||||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.2
|
||||||
|
|
||||||
// NodeProxyOptions is the query options to a Node's proxy call.
|
// NodeProxyOptions is the query options to a Node's proxy call.
|
||||||
type NodeProxyOptions struct {
|
type NodeProxyOptions struct {
|
||||||
@ -6482,6 +6707,7 @@ type NodeProxyOptions struct {
|
|||||||
|
|
||||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.2
|
||||||
|
|
||||||
// ServiceProxyOptions is the query options to a Service's proxy call.
|
// ServiceProxyOptions is the query options to a Service's proxy call.
|
||||||
type ServiceProxyOptions struct {
|
type ServiceProxyOptions struct {
|
||||||
@ -6584,6 +6810,7 @@ type TypedLocalObjectReference struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// SerializedReference is a reference to serialized object.
|
// SerializedReference is a reference to serialized object.
|
||||||
type SerializedReference struct {
|
type SerializedReference struct {
|
||||||
@ -6613,6 +6840,7 @@ const (
|
|||||||
|
|
||||||
// +genclient
|
// +genclient
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// Event is a report of an event somewhere in the cluster. Events
|
// Event is a report of an event somewhere in the cluster. Events
|
||||||
// have a limited retention time and triggers and messages may evolve
|
// have a limited retention time and triggers and messages may evolve
|
||||||
@ -6697,6 +6925,7 @@ type EventSeries struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// EventList is a list of events.
|
// EventList is a list of events.
|
||||||
type EventList struct {
|
type EventList struct {
|
||||||
@ -6711,6 +6940,7 @@ type EventList struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// List holds a list of objects, which may not be known by the server.
|
// List holds a list of objects, which may not be known by the server.
|
||||||
type List metav1.List
|
type List metav1.List
|
||||||
@ -6758,6 +6988,7 @@ type LimitRangeSpec struct {
|
|||||||
|
|
||||||
// +genclient
|
// +genclient
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// LimitRange sets resource usage limits for each kind of resource in a Namespace.
|
// LimitRange sets resource usage limits for each kind of resource in a Namespace.
|
||||||
type LimitRange struct {
|
type LimitRange struct {
|
||||||
@ -6774,6 +7005,7 @@ type LimitRange struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// LimitRangeList is a list of LimitRange items.
|
// LimitRangeList is a list of LimitRange items.
|
||||||
type LimitRangeList struct {
|
type LimitRangeList struct {
|
||||||
@ -6822,6 +7054,8 @@ const (
|
|||||||
ResourceLimitsMemory ResourceName = "limits.memory"
|
ResourceLimitsMemory ResourceName = "limits.memory"
|
||||||
// Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
|
// Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
|
||||||
ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
|
ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
|
||||||
|
// resource.k8s.io devices requested with a certain DeviceClass, number
|
||||||
|
ResourceClaimsPerClass string = ".deviceclass.resource.k8s.io/devices"
|
||||||
)
|
)
|
||||||
|
|
||||||
// The following identify resource prefix for Kubernetes object types
|
// The following identify resource prefix for Kubernetes object types
|
||||||
@ -6922,6 +7156,7 @@ type ResourceQuotaStatus struct {
|
|||||||
|
|
||||||
// +genclient
|
// +genclient
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// ResourceQuota sets aggregate quota restrictions enforced per namespace
|
// ResourceQuota sets aggregate quota restrictions enforced per namespace
|
||||||
type ResourceQuota struct {
|
type ResourceQuota struct {
|
||||||
@ -6943,6 +7178,7 @@ type ResourceQuota struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// ResourceQuotaList is a list of ResourceQuota items.
|
// ResourceQuotaList is a list of ResourceQuota items.
|
||||||
type ResourceQuotaList struct {
|
type ResourceQuotaList struct {
|
||||||
@ -6959,6 +7195,7 @@ type ResourceQuotaList struct {
|
|||||||
|
|
||||||
// +genclient
|
// +genclient
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// Secret holds secret data of a certain type. The total bytes of the values in
|
// Secret holds secret data of a certain type. The total bytes of the values in
|
||||||
// the Data field must be less than MaxSecretSize bytes.
|
// the Data field must be less than MaxSecretSize bytes.
|
||||||
@ -7085,6 +7322,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// SecretList is a list of Secret.
|
// SecretList is a list of Secret.
|
||||||
type SecretList struct {
|
type SecretList struct {
|
||||||
@ -7101,6 +7339,7 @@ type SecretList struct {
|
|||||||
|
|
||||||
// +genclient
|
// +genclient
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.2
|
||||||
|
|
||||||
// ConfigMap holds configuration data for pods to consume.
|
// ConfigMap holds configuration data for pods to consume.
|
||||||
type ConfigMap struct {
|
type ConfigMap struct {
|
||||||
@ -7137,6 +7376,7 @@ type ConfigMap struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.2
|
||||||
|
|
||||||
// ConfigMapList is a resource containing a list of ConfigMap objects.
|
// ConfigMapList is a resource containing a list of ConfigMap objects.
|
||||||
type ConfigMapList struct {
|
type ConfigMapList struct {
|
||||||
@ -7179,6 +7419,7 @@ type ComponentCondition struct {
|
|||||||
// +genclient
|
// +genclient
|
||||||
// +genclient:nonNamespaced
|
// +genclient:nonNamespaced
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// ComponentStatus (and ComponentStatusList) holds the cluster validation info.
|
// ComponentStatus (and ComponentStatusList) holds the cluster validation info.
|
||||||
// Deprecated: This API is deprecated in v1.19+
|
// Deprecated: This API is deprecated in v1.19+
|
||||||
@ -7199,6 +7440,7 @@ type ComponentStatus struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// Status of all the conditions for the component as a list of ComponentStatus objects.
|
// Status of all the conditions for the component as a list of ComponentStatus objects.
|
||||||
// Deprecated: This API is deprecated in v1.19+
|
// Deprecated: This API is deprecated in v1.19+
|
||||||
@ -7332,7 +7574,7 @@ type SecurityContext struct {
|
|||||||
// +optional
|
// +optional
|
||||||
AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"`
|
AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"`
|
||||||
// procMount denotes the type of proc mount to use for the containers.
|
// procMount denotes the type of proc mount to use for the containers.
|
||||||
// The default is DefaultProcMount which uses the container runtime defaults for
|
// The default value is Default which uses the container runtime defaults for
|
||||||
// readonly paths and masked paths.
|
// readonly paths and masked paths.
|
||||||
// This requires the ProcMountType feature flag to be enabled.
|
// This requires the ProcMountType feature flag to be enabled.
|
||||||
// Note that this field cannot be set when spec.os.name is windows.
|
// Note that this field cannot be set when spec.os.name is windows.
|
||||||
@ -7410,6 +7652,7 @@ type WindowsSecurityContextOptions struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// RangeAllocation is not a public type.
|
// RangeAllocation is not a public type.
|
||||||
type RangeAllocation struct {
|
type RangeAllocation struct {
|
||||||
@ -7519,3 +7762,23 @@ const (
|
|||||||
// the destination set to the node's IP and port or the pod's IP and port.
|
// the destination set to the node's IP and port or the pod's IP and port.
|
||||||
LoadBalancerIPModeProxy LoadBalancerIPMode = "Proxy"
|
LoadBalancerIPModeProxy LoadBalancerIPMode = "Proxy"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ImageVolumeSource represents a image volume resource.
|
||||||
|
type ImageVolumeSource struct {
|
||||||
|
// Required: Image or artifact reference to be used.
|
||||||
|
// Behaves in the same way as pod.spec.containers[*].image.
|
||||||
|
// Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets.
|
||||||
|
// More info: https://kubernetes.io/docs/concepts/containers/images
|
||||||
|
// This field is optional to allow higher level config management to default or override
|
||||||
|
// container images in workload controllers like Deployments and StatefulSets.
|
||||||
|
// +optional
|
||||||
|
Reference string `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"`
|
||||||
|
|
||||||
|
// Policy for pulling OCI objects. Possible values are:
|
||||||
|
// Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
|
||||||
|
// Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
|
||||||
|
// IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
|
||||||
|
// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
|
||||||
|
// +optional
|
||||||
|
PullPolicy PullPolicy `json:"pullPolicy,omitempty" protobuf:"bytes,2,opt,name=pullPolicy,casttype=PullPolicy"`
|
||||||
|
}
|
||||||
|
114
api/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
generated
vendored
114
api/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
generated
vendored
@ -219,16 +219,6 @@ func (CinderVolumeSource) SwaggerDoc() map[string]string {
|
|||||||
return map_CinderVolumeSource
|
return map_CinderVolumeSource
|
||||||
}
|
}
|
||||||
|
|
||||||
var map_ClaimSource = map[string]string{
|
|
||||||
"": "ClaimSource describes a reference to a ResourceClaim.\n\nExactly one of these fields should be set. Consumers of this type must treat an empty object as if it has an unknown value.",
|
|
||||||
"resourceClaimName": "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.",
|
|
||||||
"resourceClaimTemplateName": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.",
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ClaimSource) SwaggerDoc() map[string]string {
|
|
||||||
return map_ClaimSource
|
|
||||||
}
|
|
||||||
|
|
||||||
var map_ClientIPConfig = map[string]string{
|
var map_ClientIPConfig = map[string]string{
|
||||||
"": "ClientIPConfig represents the configurations of Client IP based session affinity.",
|
"": "ClientIPConfig represents the configurations of Client IP based session affinity.",
|
||||||
"timeoutSeconds": "timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours).",
|
"timeoutSeconds": "timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours).",
|
||||||
@ -482,12 +472,23 @@ var map_ContainerStatus = map[string]string{
|
|||||||
"allocatedResources": "AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize.",
|
"allocatedResources": "AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize.",
|
||||||
"resources": "Resources represents the compute resource requests and limits that have been successfully enacted on the running container after it has been started or has been successfully resized.",
|
"resources": "Resources represents the compute resource requests and limits that have been successfully enacted on the running container after it has been started or has been successfully resized.",
|
||||||
"volumeMounts": "Status of volume mounts.",
|
"volumeMounts": "Status of volume mounts.",
|
||||||
|
"user": "User represents user identity information initially attached to the first process of the container",
|
||||||
|
"allocatedResourcesStatus": "AllocatedResourcesStatus represents the status of various resources allocated for this Pod.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ContainerStatus) SwaggerDoc() map[string]string {
|
func (ContainerStatus) SwaggerDoc() map[string]string {
|
||||||
return map_ContainerStatus
|
return map_ContainerStatus
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var map_ContainerUser = map[string]string{
|
||||||
|
"": "ContainerUser represents user identity information",
|
||||||
|
"linux": "Linux holds user identity information initially attached to the first process of the containers in Linux. Note that the actual running identity can be changed if the process has enough privilege to do so.",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ContainerUser) SwaggerDoc() map[string]string {
|
||||||
|
return map_ContainerUser
|
||||||
|
}
|
||||||
|
|
||||||
var map_DaemonEndpoint = map[string]string{
|
var map_DaemonEndpoint = map[string]string{
|
||||||
"": "DaemonEndpoint contains information about a single Daemon endpoint.",
|
"": "DaemonEndpoint contains information about a single Daemon endpoint.",
|
||||||
"Port": "Port number of the given endpoint.",
|
"Port": "Port number of the given endpoint.",
|
||||||
@ -933,6 +934,16 @@ func (ISCSIVolumeSource) SwaggerDoc() map[string]string {
|
|||||||
return map_ISCSIVolumeSource
|
return map_ISCSIVolumeSource
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var map_ImageVolumeSource = map[string]string{
|
||||||
|
"": "ImageVolumeSource represents a image volume resource.",
|
||||||
|
"reference": "Required: Image or artifact reference to be used. Behaves in the same way as pod.spec.containers[*].image. Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.",
|
||||||
|
"pullPolicy": "Policy for pulling OCI objects. Possible values are: Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ImageVolumeSource) SwaggerDoc() map[string]string {
|
||||||
|
return map_ImageVolumeSource
|
||||||
|
}
|
||||||
|
|
||||||
var map_KeyToPath = map[string]string{
|
var map_KeyToPath = map[string]string{
|
||||||
"": "Maps a string key to a path within a volume.",
|
"": "Maps a string key to a path within a volume.",
|
||||||
"key": "key is the key to project.",
|
"key": "key is the key to project.",
|
||||||
@ -1009,6 +1020,17 @@ func (LimitRangeSpec) SwaggerDoc() map[string]string {
|
|||||||
return map_LimitRangeSpec
|
return map_LimitRangeSpec
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var map_LinuxContainerUser = map[string]string{
|
||||||
|
"": "LinuxContainerUser represents user identity information in Linux containers",
|
||||||
|
"uid": "UID is the primary uid initially attached to the first process in the container",
|
||||||
|
"gid": "GID is the primary gid initially attached to the first process in the container",
|
||||||
|
"supplementalGroups": "SupplementalGroups are the supplemental groups initially attached to the first process in the container",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (LinuxContainerUser) SwaggerDoc() map[string]string {
|
||||||
|
return map_LinuxContainerUser
|
||||||
|
}
|
||||||
|
|
||||||
var map_LoadBalancerIngress = map[string]string{
|
var map_LoadBalancerIngress = map[string]string{
|
||||||
"": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.",
|
"": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.",
|
||||||
"ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)",
|
"ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)",
|
||||||
@ -1195,6 +1217,15 @@ func (NodeDaemonEndpoints) SwaggerDoc() map[string]string {
|
|||||||
return map_NodeDaemonEndpoints
|
return map_NodeDaemonEndpoints
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var map_NodeFeatures = map[string]string{
|
||||||
|
"": "NodeFeatures describes the set of features implemented by the CRI implementation. The features contained in the NodeFeatures should depend only on the cri implementation independent of runtime handlers.",
|
||||||
|
"supplementalGroupsPolicy": "SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser.",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (NodeFeatures) SwaggerDoc() map[string]string {
|
||||||
|
return map_NodeFeatures
|
||||||
|
}
|
||||||
|
|
||||||
var map_NodeList = map[string]string{
|
var map_NodeList = map[string]string{
|
||||||
"": "NodeList is the whole list of all Nodes which have been registered with master.",
|
"": "NodeList is the whole list of all Nodes which have been registered with master.",
|
||||||
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||||
@ -1225,8 +1256,9 @@ func (NodeRuntimeHandler) SwaggerDoc() map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var map_NodeRuntimeHandlerFeatures = map[string]string{
|
var map_NodeRuntimeHandlerFeatures = map[string]string{
|
||||||
"": "NodeRuntimeHandlerFeatures is a set of runtime features.",
|
"": "NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.",
|
||||||
"recursiveReadOnlyMounts": "RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.",
|
"recursiveReadOnlyMounts": "RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.",
|
||||||
|
"userNamespaces": "UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (NodeRuntimeHandlerFeatures) SwaggerDoc() map[string]string {
|
func (NodeRuntimeHandlerFeatures) SwaggerDoc() map[string]string {
|
||||||
@ -1280,7 +1312,7 @@ func (NodeSpec) SwaggerDoc() map[string]string {
|
|||||||
|
|
||||||
var map_NodeStatus = map[string]string{
|
var map_NodeStatus = map[string]string{
|
||||||
"": "NodeStatus is information about the current status of a node.",
|
"": "NodeStatus is information about the current status of a node.",
|
||||||
"capacity": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity",
|
"capacity": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/reference/node/node-status/#capacity",
|
||||||
"allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.",
|
"allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.",
|
||||||
"phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.",
|
"phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.",
|
||||||
"conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition",
|
"conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition",
|
||||||
@ -1292,6 +1324,7 @@ var map_NodeStatus = map[string]string{
|
|||||||
"volumesAttached": "List of volumes that are attached to the node.",
|
"volumesAttached": "List of volumes that are attached to the node.",
|
||||||
"config": "Status of the config assigned to the node via the dynamic Kubelet config feature.",
|
"config": "Status of the config assigned to the node via the dynamic Kubelet config feature.",
|
||||||
"runtimeHandlers": "The available runtime handlers.",
|
"runtimeHandlers": "The available runtime handlers.",
|
||||||
|
"features": "Features describes the set of features implemented by the CRI implementation.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (NodeStatus) SwaggerDoc() map[string]string {
|
func (NodeStatus) SwaggerDoc() map[string]string {
|
||||||
@ -1307,7 +1340,7 @@ var map_NodeSystemInfo = map[string]string{
|
|||||||
"osImage": "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).",
|
"osImage": "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).",
|
||||||
"containerRuntimeVersion": "ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2).",
|
"containerRuntimeVersion": "ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2).",
|
||||||
"kubeletVersion": "Kubelet Version reported by the node.",
|
"kubeletVersion": "Kubelet Version reported by the node.",
|
||||||
"kubeProxyVersion": "KubeProxy Version reported by the node.",
|
"kubeProxyVersion": "Deprecated: KubeProxy Version reported by the node.",
|
||||||
"operatingSystem": "The Operating System reported by the node",
|
"operatingSystem": "The Operating System reported by the node",
|
||||||
"architecture": "The Architecture reported by the node",
|
"architecture": "The Architecture reported by the node",
|
||||||
}
|
}
|
||||||
@ -1395,7 +1428,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{
|
|||||||
"volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.",
|
"volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.",
|
||||||
"dataSource": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.",
|
"dataSource": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.",
|
||||||
"dataSourceRef": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.",
|
"dataSourceRef": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.",
|
||||||
"volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.",
|
"volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string {
|
func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string {
|
||||||
@ -1410,8 +1443,8 @@ var map_PersistentVolumeClaimStatus = map[string]string{
|
|||||||
"conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'Resizing'.",
|
"conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'Resizing'.",
|
||||||
"allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
|
"allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
|
||||||
"allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
|
"allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
|
||||||
"currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is an alpha field and requires enabling VolumeAttributesClass feature.",
|
"currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
|
||||||
"modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is an alpha field and requires enabling VolumeAttributesClass feature.",
|
"modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string {
|
func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string {
|
||||||
@ -1488,7 +1521,7 @@ var map_PersistentVolumeSpec = map[string]string{
|
|||||||
"mountOptions": "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options",
|
"mountOptions": "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options",
|
||||||
"volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.",
|
"volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.",
|
||||||
"nodeAffinity": "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.",
|
"nodeAffinity": "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.",
|
||||||
"volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature.",
|
"volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (PersistentVolumeSpec) SwaggerDoc() map[string]string {
|
func (PersistentVolumeSpec) SwaggerDoc() map[string]string {
|
||||||
@ -1500,7 +1533,7 @@ var map_PersistentVolumeStatus = map[string]string{
|
|||||||
"phase": "phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase",
|
"phase": "phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase",
|
||||||
"message": "message is a human-readable message indicating details about why the volume is in this state.",
|
"message": "message is a human-readable message indicating details about why the volume is in this state.",
|
||||||
"reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.",
|
"reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.",
|
||||||
"lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default).",
|
"lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (PersistentVolumeStatus) SwaggerDoc() map[string]string {
|
func (PersistentVolumeStatus) SwaggerDoc() map[string]string {
|
||||||
@ -1544,8 +1577,8 @@ var map_PodAffinityTerm = map[string]string{
|
|||||||
"namespaces": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".",
|
"namespaces": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".",
|
||||||
"topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.",
|
"topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.",
|
||||||
"namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces.",
|
"namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces.",
|
||||||
"matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.",
|
"matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).",
|
||||||
"mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.",
|
"mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (PodAffinityTerm) SwaggerDoc() map[string]string {
|
func (PodAffinityTerm) SwaggerDoc() map[string]string {
|
||||||
@ -1696,9 +1729,10 @@ func (PodReadinessGate) SwaggerDoc() map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var map_PodResourceClaim = map[string]string{
|
var map_PodResourceClaim = map[string]string{
|
||||||
"": "PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.",
|
"": "PodResourceClaim references exactly one ResourceClaim, either directly or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim for the pod.\n\nIt adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.",
|
||||||
"name": "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.",
|
"name": "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.",
|
||||||
"source": "Source describes where to find the ResourceClaim.",
|
"resourceClaimName": "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.\n\nExactly one of ResourceClaimName and ResourceClaimTemplateName must be set.",
|
||||||
|
"resourceClaimTemplateName": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.\n\nExactly one of ResourceClaimName and ResourceClaimTemplateName must be set.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (PodResourceClaim) SwaggerDoc() map[string]string {
|
func (PodResourceClaim) SwaggerDoc() map[string]string {
|
||||||
@ -1708,7 +1742,7 @@ func (PodResourceClaim) SwaggerDoc() map[string]string {
|
|||||||
var map_PodResourceClaimStatus = map[string]string{
|
var map_PodResourceClaimStatus = map[string]string{
|
||||||
"": "PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim which references a ResourceClaimTemplate. It stores the generated name for the corresponding ResourceClaim.",
|
"": "PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim which references a ResourceClaimTemplate. It stores the generated name for the corresponding ResourceClaim.",
|
||||||
"name": "Name uniquely identifies this resource claim inside the pod. This must match the name of an entry in pod.spec.resourceClaims, which implies that the string must be a DNS_LABEL.",
|
"name": "Name uniquely identifies this resource claim inside the pod. This must match the name of an entry in pod.spec.resourceClaims, which implies that the string must be a DNS_LABEL.",
|
||||||
"resourceClaimName": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. It this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case.",
|
"resourceClaimName": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. If this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (PodResourceClaimStatus) SwaggerDoc() map[string]string {
|
func (PodResourceClaimStatus) SwaggerDoc() map[string]string {
|
||||||
@ -1731,7 +1765,8 @@ var map_PodSecurityContext = map[string]string{
|
|||||||
"runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.",
|
"runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
"runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.",
|
"runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
"runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
|
"runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
|
||||||
"supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.",
|
"supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID and fsGroup (if specified). If the SupplementalGroupsPolicy feature is enabled, the supplementalGroupsPolicy field determines whether these are in addition to or instead of any group memberships defined in the container image. If unspecified, no additional groups are added, though group memberships defined in the container image may still be used, depending on the supplementalGroupsPolicy field. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
|
"supplementalGroupsPolicy": "Defines how supplemental groups of the first container processes are calculated. Valid values are \"Merge\" and \"Strict\". If not specified, \"Merge\" is used. (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled and the container runtime must implement support for this feature. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
"fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ",
|
"fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ",
|
||||||
"sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.",
|
"sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
"fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.",
|
"fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
@ -1766,7 +1801,7 @@ var map_PodSpec = map[string]string{
|
|||||||
"serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/",
|
"serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/",
|
||||||
"serviceAccount": "DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.",
|
"serviceAccount": "DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.",
|
||||||
"automountServiceAccountToken": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.",
|
"automountServiceAccountToken": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.",
|
||||||
"nodeName": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.",
|
"nodeName": "NodeName indicates in which node this pod is scheduled. If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. This field should not be used to express a desire for the pod to be scheduled on a specific node. https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename",
|
||||||
"hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
|
"hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
|
||||||
"hostPID": "Use the host's pid namespace. Optional: Default to false.",
|
"hostPID": "Use the host's pid namespace. Optional: Default to false.",
|
||||||
"hostIPC": "Use the host's ipc namespace. Optional: Default to false.",
|
"hostIPC": "Use the host's ipc namespace. Optional: Default to false.",
|
||||||
@ -1789,7 +1824,7 @@ var map_PodSpec = map[string]string{
|
|||||||
"overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md",
|
"overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md",
|
||||||
"topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.",
|
"topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.",
|
||||||
"setHostnameAsFQDN": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.",
|
"setHostnameAsFQDN": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.",
|
||||||
"os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup",
|
"os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup",
|
||||||
"hostUsers": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.",
|
"hostUsers": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.",
|
||||||
"schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.",
|
"schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.",
|
||||||
"resourceClaims": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.",
|
"resourceClaims": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.",
|
||||||
@ -1943,7 +1978,7 @@ func (ProbeHandler) SwaggerDoc() map[string]string {
|
|||||||
|
|
||||||
var map_ProjectedVolumeSource = map[string]string{
|
var map_ProjectedVolumeSource = map[string]string{
|
||||||
"": "Represents a projected volume source",
|
"": "Represents a projected volume source",
|
||||||
"sources": "sources is the list of volume projections",
|
"sources": "sources is the list of volume projections. Each entry in this list handles one source.",
|
||||||
"defaultMode": "defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
|
"defaultMode": "defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2071,6 +2106,7 @@ func (ReplicationControllerStatus) SwaggerDoc() map[string]string {
|
|||||||
var map_ResourceClaim = map[string]string{
|
var map_ResourceClaim = map[string]string{
|
||||||
"": "ResourceClaim references one entry in PodSpec.ResourceClaims.",
|
"": "ResourceClaim references one entry in PodSpec.ResourceClaims.",
|
||||||
"name": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.",
|
"name": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.",
|
||||||
|
"request": "Request is the name chosen for a request in the referenced claim. If empty, everything from the claim is made available, otherwise only the result of this request.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ResourceClaim) SwaggerDoc() map[string]string {
|
func (ResourceClaim) SwaggerDoc() map[string]string {
|
||||||
@ -2088,6 +2124,16 @@ func (ResourceFieldSelector) SwaggerDoc() map[string]string {
|
|||||||
return map_ResourceFieldSelector
|
return map_ResourceFieldSelector
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var map_ResourceHealth = map[string]string{
|
||||||
|
"": "ResourceHealth represents the health of a resource. It has the latest device health information. This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP.",
|
||||||
|
"resourceID": "ResourceID is the unique identifier of the resource. See the ResourceID type for more information.",
|
||||||
|
"health": "Health of the resource. can be one of:\n - Healthy: operates as normal\n - Unhealthy: reported unhealthy. We consider this a temporary health issue\n since we do not have a mechanism today to distinguish\n temporary and permanent issues.\n - Unknown: The status cannot be determined.\n For example, Device Plugin got unregistered and hasn't been re-registered since.\n\nIn future we may want to introduce the PermanentlyUnhealthy Status.",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ResourceHealth) SwaggerDoc() map[string]string {
|
||||||
|
return map_ResourceHealth
|
||||||
|
}
|
||||||
|
|
||||||
var map_ResourceQuota = map[string]string{
|
var map_ResourceQuota = map[string]string{
|
||||||
"": "ResourceQuota sets aggregate quota restrictions enforced per namespace",
|
"": "ResourceQuota sets aggregate quota restrictions enforced per namespace",
|
||||||
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||||
@ -2141,6 +2187,15 @@ func (ResourceRequirements) SwaggerDoc() map[string]string {
|
|||||||
return map_ResourceRequirements
|
return map_ResourceRequirements
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var map_ResourceStatus = map[string]string{
|
||||||
|
"name": "Name of the resource. Must be unique within the pod and match one of the resources from the pod spec.",
|
||||||
|
"resources": "List of unique Resources health. Each element in the list contains an unique resource ID and resource health. At a minimum, ResourceID must uniquely identify the Resource allocated to the Pod on the Node for the lifetime of a Pod. See ResourceID type for it's definition.",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ResourceStatus) SwaggerDoc() map[string]string {
|
||||||
|
return map_ResourceStatus
|
||||||
|
}
|
||||||
|
|
||||||
var map_SELinuxOptions = map[string]string{
|
var map_SELinuxOptions = map[string]string{
|
||||||
"": "SELinuxOptions are the labels to be applied to the container",
|
"": "SELinuxOptions are the labels to be applied to the container",
|
||||||
"user": "User is a SELinux user label that applies to the container.",
|
"user": "User is a SELinux user label that applies to the container.",
|
||||||
@ -2304,7 +2359,7 @@ var map_SecurityContext = map[string]string{
|
|||||||
"runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
|
"runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
|
||||||
"readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.",
|
"readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
"allowPrivilegeEscalation": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.",
|
"allowPrivilegeEscalation": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.",
|
||||||
"procMount": "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.",
|
"procMount": "procMount denotes the type of proc mount to use for the containers. The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
"seccompProfile": "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.",
|
"seccompProfile": "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
"appArmorProfile": "appArmorProfile is the AppArmor options to use by this container. If set, this profile overrides the pod's appArmorProfile. Note that this field cannot be set when spec.os.name is windows.",
|
"appArmorProfile": "appArmorProfile is the AppArmor options to use by this container. If set, this profile overrides the pod's appArmorProfile. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
}
|
}
|
||||||
@ -2639,7 +2694,7 @@ func (VolumeNodeAffinity) SwaggerDoc() map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var map_VolumeProjection = map[string]string{
|
var map_VolumeProjection = map[string]string{
|
||||||
"": "Projection that may be projected along with other supported volume types",
|
"": "Projection that may be projected along with other supported volume types. Exactly one of these fields must be set.",
|
||||||
"secret": "secret information about the secret data to project",
|
"secret": "secret information about the secret data to project",
|
||||||
"downwardAPI": "downwardAPI information about the downwardAPI data to project",
|
"downwardAPI": "downwardAPI information about the downwardAPI data to project",
|
||||||
"configMap": "configMap information about the configMap data to project",
|
"configMap": "configMap information about the configMap data to project",
|
||||||
@ -2692,6 +2747,7 @@ var map_VolumeSource = map[string]string{
|
|||||||
"storageos": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
|
"storageos": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
|
||||||
"csi": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).",
|
"csi": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).",
|
||||||
"ephemeral": "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.",
|
"ephemeral": "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.",
|
||||||
|
"image": "image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. The volume is resolved at pod startup depending on which PullPolicy value is provided:\n\n- Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.\n\nThe volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (VolumeSource) SwaggerDoc() map[string]string {
|
func (VolumeSource) SwaggerDoc() map[string]string {
|
||||||
|
185
api/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
generated
vendored
185
api/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
generated
vendored
@ -440,32 +440,6 @@ func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *ClaimSource) DeepCopyInto(out *ClaimSource) {
|
|
||||||
*out = *in
|
|
||||||
if in.ResourceClaimName != nil {
|
|
||||||
in, out := &in.ResourceClaimName, &out.ResourceClaimName
|
|
||||||
*out = new(string)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.ResourceClaimTemplateName != nil {
|
|
||||||
in, out := &in.ResourceClaimTemplateName, &out.ResourceClaimTemplateName
|
|
||||||
*out = new(string)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimSource.
|
|
||||||
func (in *ClaimSource) DeepCopy() *ClaimSource {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(ClaimSource)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) {
|
func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@ -1069,6 +1043,18 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) {
|
|||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if in.User != nil {
|
||||||
|
in, out := &in.User, &out.User
|
||||||
|
*out = new(ContainerUser)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.AllocatedResourcesStatus != nil {
|
||||||
|
in, out := &in.AllocatedResourcesStatus, &out.AllocatedResourcesStatus
|
||||||
|
*out = make([]ResourceStatus, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1082,6 +1068,27 @@ func (in *ContainerStatus) DeepCopy() *ContainerStatus {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ContainerUser) DeepCopyInto(out *ContainerUser) {
|
||||||
|
*out = *in
|
||||||
|
if in.Linux != nil {
|
||||||
|
in, out := &in.Linux, &out.Linux
|
||||||
|
*out = new(LinuxContainerUser)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerUser.
|
||||||
|
func (in *ContainerUser) DeepCopy() *ContainerUser {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ContainerUser)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) {
|
func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@ -2044,6 +2051,22 @@ func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ImageVolumeSource) DeepCopyInto(out *ImageVolumeSource) {
|
||||||
|
*out = *in
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageVolumeSource.
|
||||||
|
func (in *ImageVolumeSource) DeepCopy() *ImageVolumeSource {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ImageVolumeSource)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *KeyToPath) DeepCopyInto(out *KeyToPath) {
|
func (in *KeyToPath) DeepCopyInto(out *KeyToPath) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@ -2261,6 +2284,27 @@ func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *LinuxContainerUser) DeepCopyInto(out *LinuxContainerUser) {
|
||||||
|
*out = *in
|
||||||
|
if in.SupplementalGroups != nil {
|
||||||
|
in, out := &in.SupplementalGroups, &out.SupplementalGroups
|
||||||
|
*out = make([]int64, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxContainerUser.
|
||||||
|
func (in *LinuxContainerUser) DeepCopy() *LinuxContainerUser {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(LinuxContainerUser)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *List) DeepCopyInto(out *List) {
|
func (in *List) DeepCopyInto(out *List) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@ -2695,6 +2739,27 @@ func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *NodeFeatures) DeepCopyInto(out *NodeFeatures) {
|
||||||
|
*out = *in
|
||||||
|
if in.SupplementalGroupsPolicy != nil {
|
||||||
|
in, out := &in.SupplementalGroupsPolicy, &out.SupplementalGroupsPolicy
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeFeatures.
|
||||||
|
func (in *NodeFeatures) DeepCopy() *NodeFeatures {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(NodeFeatures)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *NodeList) DeepCopyInto(out *NodeList) {
|
func (in *NodeList) DeepCopyInto(out *NodeList) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@ -2782,6 +2847,11 @@ func (in *NodeRuntimeHandlerFeatures) DeepCopyInto(out *NodeRuntimeHandlerFeatur
|
|||||||
*out = new(bool)
|
*out = new(bool)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.UserNamespaces != nil {
|
||||||
|
in, out := &in.UserNamespaces, &out.UserNamespaces
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2962,6 +3032,11 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
|
|||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if in.Features != nil {
|
||||||
|
in, out := &in.Features, &out.Features
|
||||||
|
*out = new(NodeFeatures)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3971,7 +4046,16 @@ func (in *PodReadinessGate) DeepCopy() *PodReadinessGate {
|
|||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *PodResourceClaim) DeepCopyInto(out *PodResourceClaim) {
|
func (in *PodResourceClaim) DeepCopyInto(out *PodResourceClaim) {
|
||||||
*out = *in
|
*out = *in
|
||||||
in.Source.DeepCopyInto(&out.Source)
|
if in.ResourceClaimName != nil {
|
||||||
|
in, out := &in.ResourceClaimName, &out.ResourceClaimName
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.ResourceClaimTemplateName != nil {
|
||||||
|
in, out := &in.ResourceClaimTemplateName, &out.ResourceClaimTemplateName
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4055,6 +4139,11 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) {
|
|||||||
*out = make([]int64, len(*in))
|
*out = make([]int64, len(*in))
|
||||||
copy(*out, *in)
|
copy(*out, *in)
|
||||||
}
|
}
|
||||||
|
if in.SupplementalGroupsPolicy != nil {
|
||||||
|
in, out := &in.SupplementalGroupsPolicy, &out.SupplementalGroupsPolicy
|
||||||
|
*out = new(SupplementalGroupsPolicy)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
if in.FSGroup != nil {
|
if in.FSGroup != nil {
|
||||||
in, out := &in.FSGroup, &out.FSGroup
|
in, out := &in.FSGroup, &out.FSGroup
|
||||||
*out = new(int64)
|
*out = new(int64)
|
||||||
@ -4900,6 +4989,22 @@ func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ResourceHealth) DeepCopyInto(out *ResourceHealth) {
|
||||||
|
*out = *in
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceHealth.
|
||||||
|
func (in *ResourceHealth) DeepCopy() *ResourceHealth {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ResourceHealth)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in ResourceList) DeepCopyInto(out *ResourceList) {
|
func (in ResourceList) DeepCopyInto(out *ResourceList) {
|
||||||
{
|
{
|
||||||
@ -5081,6 +5186,27 @@ func (in *ResourceRequirements) DeepCopy() *ResourceRequirements {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ResourceStatus) DeepCopyInto(out *ResourceStatus) {
|
||||||
|
*out = *in
|
||||||
|
if in.Resources != nil {
|
||||||
|
in, out := &in.Resources, &out.Resources
|
||||||
|
*out = make([]ResourceHealth, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceStatus.
|
||||||
|
func (in *ResourceStatus) DeepCopy() *ResourceStatus {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ResourceStatus)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) {
|
func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@ -6426,6 +6552,11 @@ func (in *VolumeSource) DeepCopyInto(out *VolumeSource) {
|
|||||||
*out = new(EphemeralVolumeSource)
|
*out = new(EphemeralVolumeSource)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
|
if in.Image != nil {
|
||||||
|
in, out := &in.Image, &out.Image
|
||||||
|
*out = new(ImageVolumeSource)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
274
api/vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go
generated
vendored
Normal file
274
api/vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go
generated
vendored
Normal file
@ -0,0 +1,274 @@
|
|||||||
|
//go:build !ignore_autogenerated
|
||||||
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package v1
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *Binding) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ConfigMap) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ConfigMapList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *Endpoints) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *EndpointsList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *Event) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *EventList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *LimitRange) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *LimitRangeList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *List) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *Namespace) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *NamespaceList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *Node) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *NodeList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *NodeProxyOptions) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PersistentVolume) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PersistentVolumeClaim) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PersistentVolumeClaimList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PersistentVolumeList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *Pod) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PodAttachOptions) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PodExecOptions) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PodList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PodLogOptions) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PodPortForwardOptions) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 6
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PodProxyOptions) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PodStatusResult) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PodTemplate) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PodTemplateList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *RangeAllocation) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ReplicationController) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ReplicationControllerList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ResourceQuota) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ResourceQuotaList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *Secret) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *SecretList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *SerializedReference) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *Service) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ServiceAccount) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ServiceAccountList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ServiceList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ServiceProxyOptions) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 2
|
||||||
|
}
|
2
api/vendor/k8s.io/api/rbac/v1/doc.go
generated
vendored
2
api/vendor/k8s.io/api/rbac/v1/doc.go
generated
vendored
@ -17,7 +17,7 @@ limitations under the License.
|
|||||||
// +k8s:deepcopy-gen=package
|
// +k8s:deepcopy-gen=package
|
||||||
// +k8s:protobuf-gen=package
|
// +k8s:protobuf-gen=package
|
||||||
// +k8s:openapi-gen=true
|
// +k8s:openapi-gen=true
|
||||||
|
// +k8s:prerelease-lifecycle-gen=true
|
||||||
// +groupName=rbac.authorization.k8s.io
|
// +groupName=rbac.authorization.k8s.io
|
||||||
|
|
||||||
package v1 // import "k8s.io/api/rbac/v1"
|
package v1 // import "k8s.io/api/rbac/v1"
|
||||||
|
18
api/vendor/k8s.io/api/rbac/v1/generated.proto
generated
vendored
18
api/vendor/k8s.io/api/rbac/v1/generated.proto
generated
vendored
@ -34,14 +34,14 @@ message AggregationRule {
|
|||||||
// If any of the selectors match, then the ClusterRole's permissions will be added
|
// If any of the selectors match, then the ClusterRole's permissions will be added
|
||||||
// +optional
|
// +optional
|
||||||
// +listType=atomic
|
// +listType=atomic
|
||||||
repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1;
|
repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
|
// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
|
||||||
message ClusterRole {
|
message ClusterRole {
|
||||||
// Standard object's metadata.
|
// Standard object's metadata.
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||||
|
|
||||||
// Rules holds all the PolicyRules for this ClusterRole
|
// Rules holds all the PolicyRules for this ClusterRole
|
||||||
// +optional
|
// +optional
|
||||||
@ -60,7 +60,7 @@ message ClusterRole {
|
|||||||
message ClusterRoleBinding {
|
message ClusterRoleBinding {
|
||||||
// Standard object's metadata.
|
// Standard object's metadata.
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||||
|
|
||||||
// Subjects holds references to the objects the role applies to.
|
// Subjects holds references to the objects the role applies to.
|
||||||
// +optional
|
// +optional
|
||||||
@ -77,7 +77,7 @@ message ClusterRoleBinding {
|
|||||||
message ClusterRoleBindingList {
|
message ClusterRoleBindingList {
|
||||||
// Standard object's metadata.
|
// Standard object's metadata.
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||||
|
|
||||||
// Items is a list of ClusterRoleBindings
|
// Items is a list of ClusterRoleBindings
|
||||||
repeated ClusterRoleBinding items = 2;
|
repeated ClusterRoleBinding items = 2;
|
||||||
@ -87,7 +87,7 @@ message ClusterRoleBindingList {
|
|||||||
message ClusterRoleList {
|
message ClusterRoleList {
|
||||||
// Standard object's metadata.
|
// Standard object's metadata.
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||||
|
|
||||||
// Items is a list of ClusterRoles
|
// Items is a list of ClusterRoles
|
||||||
repeated ClusterRole items = 2;
|
repeated ClusterRole items = 2;
|
||||||
@ -128,7 +128,7 @@ message PolicyRule {
|
|||||||
message Role {
|
message Role {
|
||||||
// Standard object's metadata.
|
// Standard object's metadata.
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||||
|
|
||||||
// Rules holds all the PolicyRules for this Role
|
// Rules holds all the PolicyRules for this Role
|
||||||
// +optional
|
// +optional
|
||||||
@ -142,7 +142,7 @@ message Role {
|
|||||||
message RoleBinding {
|
message RoleBinding {
|
||||||
// Standard object's metadata.
|
// Standard object's metadata.
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||||
|
|
||||||
// Subjects holds references to the objects the role applies to.
|
// Subjects holds references to the objects the role applies to.
|
||||||
// +optional
|
// +optional
|
||||||
@ -159,7 +159,7 @@ message RoleBinding {
|
|||||||
message RoleBindingList {
|
message RoleBindingList {
|
||||||
// Standard object's metadata.
|
// Standard object's metadata.
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||||
|
|
||||||
// Items is a list of RoleBindings
|
// Items is a list of RoleBindings
|
||||||
repeated RoleBinding items = 2;
|
repeated RoleBinding items = 2;
|
||||||
@ -169,7 +169,7 @@ message RoleBindingList {
|
|||||||
message RoleList {
|
message RoleList {
|
||||||
// Standard object's metadata.
|
// Standard object's metadata.
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||||
|
|
||||||
// Items is a list of Roles
|
// Items is a list of Roles
|
||||||
repeated Role items = 2;
|
repeated Role items = 2;
|
||||||
|
10
api/vendor/k8s.io/api/rbac/v1/types.go
generated
vendored
10
api/vendor/k8s.io/api/rbac/v1/types.go
generated
vendored
@ -84,7 +84,7 @@ type Subject struct {
|
|||||||
// Defaults to "" for ServiceAccount subjects.
|
// Defaults to "" for ServiceAccount subjects.
|
||||||
// Defaults to "rbac.authorization.k8s.io" for User and Group subjects.
|
// Defaults to "rbac.authorization.k8s.io" for User and Group subjects.
|
||||||
// +optional
|
// +optional
|
||||||
APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt.name=apiGroup"`
|
APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt,name=apiGroup"`
|
||||||
// Name of the object being referenced.
|
// Name of the object being referenced.
|
||||||
Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
|
Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
|
||||||
// Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
|
// Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
|
||||||
@ -106,6 +106,7 @@ type RoleRef struct {
|
|||||||
|
|
||||||
// +genclient
|
// +genclient
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.8
|
||||||
|
|
||||||
// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
|
// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
|
||||||
type Role struct {
|
type Role struct {
|
||||||
@ -122,6 +123,7 @@ type Role struct {
|
|||||||
|
|
||||||
// +genclient
|
// +genclient
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.8
|
||||||
|
|
||||||
// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace.
|
// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace.
|
||||||
// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given
|
// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given
|
||||||
@ -144,6 +146,7 @@ type RoleBinding struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.8
|
||||||
|
|
||||||
// RoleBindingList is a collection of RoleBindings
|
// RoleBindingList is a collection of RoleBindings
|
||||||
type RoleBindingList struct {
|
type RoleBindingList struct {
|
||||||
@ -157,6 +160,7 @@ type RoleBindingList struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.8
|
||||||
|
|
||||||
// RoleList is a collection of Roles
|
// RoleList is a collection of Roles
|
||||||
type RoleList struct {
|
type RoleList struct {
|
||||||
@ -172,6 +176,7 @@ type RoleList struct {
|
|||||||
// +genclient
|
// +genclient
|
||||||
// +genclient:nonNamespaced
|
// +genclient:nonNamespaced
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.8
|
||||||
|
|
||||||
// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
|
// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
|
||||||
type ClusterRole struct {
|
type ClusterRole struct {
|
||||||
@ -204,6 +209,7 @@ type AggregationRule struct {
|
|||||||
// +genclient
|
// +genclient
|
||||||
// +genclient:nonNamespaced
|
// +genclient:nonNamespaced
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.8
|
||||||
|
|
||||||
// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace,
|
// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace,
|
||||||
// and adds who information via Subject.
|
// and adds who information via Subject.
|
||||||
@ -225,6 +231,7 @@ type ClusterRoleBinding struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.8
|
||||||
|
|
||||||
// ClusterRoleBindingList is a collection of ClusterRoleBindings
|
// ClusterRoleBindingList is a collection of ClusterRoleBindings
|
||||||
type ClusterRoleBindingList struct {
|
type ClusterRoleBindingList struct {
|
||||||
@ -238,6 +245,7 @@ type ClusterRoleBindingList struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.8
|
||||||
|
|
||||||
// ClusterRoleList is a collection of ClusterRoles
|
// ClusterRoleList is a collection of ClusterRoles
|
||||||
type ClusterRoleList struct {
|
type ClusterRoleList struct {
|
||||||
|
70
api/vendor/k8s.io/api/rbac/v1/zz_generated.prerelease-lifecycle.go
generated
vendored
Normal file
70
api/vendor/k8s.io/api/rbac/v1/zz_generated.prerelease-lifecycle.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
//go:build !ignore_autogenerated
|
||||||
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package v1
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ClusterRole) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 8
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ClusterRoleBinding) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 8
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ClusterRoleBindingList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 8
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ClusterRoleList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 8
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *Role) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 8
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *RoleBinding) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 8
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *RoleBindingList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 8
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *RoleList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 8
|
||||||
|
}
|
1
api/vendor/k8s.io/api/storage/v1/doc.go
generated
vendored
1
api/vendor/k8s.io/api/storage/v1/doc.go
generated
vendored
@ -18,5 +18,6 @@ limitations under the License.
|
|||||||
// +k8s:protobuf-gen=package
|
// +k8s:protobuf-gen=package
|
||||||
// +groupName=storage.k8s.io
|
// +groupName=storage.k8s.io
|
||||||
// +k8s:openapi-gen=true
|
// +k8s:openapi-gen=true
|
||||||
|
// +k8s:prerelease-lifecycle-gen=true
|
||||||
|
|
||||||
package v1 // import "k8s.io/api/storage/v1"
|
package v1 // import "k8s.io/api/storage/v1"
|
||||||
|
32
api/vendor/k8s.io/api/storage/v1/generated.proto
generated
vendored
32
api/vendor/k8s.io/api/storage/v1/generated.proto
generated
vendored
@ -44,7 +44,7 @@ message CSIDriver {
|
|||||||
// an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and
|
// an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and
|
||||||
// alphanumerics between.
|
// alphanumerics between.
|
||||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||||
|
|
||||||
// spec represents the specification of the CSI Driver.
|
// spec represents the specification of the CSI Driver.
|
||||||
optional CSIDriverSpec spec = 2;
|
optional CSIDriverSpec spec = 2;
|
||||||
@ -55,7 +55,7 @@ message CSIDriverList {
|
|||||||
// Standard list metadata
|
// Standard list metadata
|
||||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||||
|
|
||||||
// items is the list of CSIDriver
|
// items is the list of CSIDriver
|
||||||
repeated CSIDriver items = 2;
|
repeated CSIDriver items = 2;
|
||||||
@ -226,7 +226,7 @@ message CSIDriverSpec {
|
|||||||
message CSINode {
|
message CSINode {
|
||||||
// Standard object's metadata.
|
// Standard object's metadata.
|
||||||
// metadata.name must be the Kubernetes node name.
|
// metadata.name must be the Kubernetes node name.
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||||
|
|
||||||
// spec is the specification of CSINode
|
// spec is the specification of CSINode
|
||||||
optional CSINodeSpec spec = 2;
|
optional CSINodeSpec spec = 2;
|
||||||
@ -275,7 +275,7 @@ message CSINodeList {
|
|||||||
// Standard list metadata
|
// Standard list metadata
|
||||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||||
|
|
||||||
// items is the list of CSINode
|
// items is the list of CSINode
|
||||||
repeated CSINode items = 2;
|
repeated CSINode items = 2;
|
||||||
@ -327,7 +327,7 @@ message CSIStorageCapacity {
|
|||||||
//
|
//
|
||||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||||
|
|
||||||
// nodeTopology defines which nodes have access to the storage
|
// nodeTopology defines which nodes have access to the storage
|
||||||
// for which capacity was reported. If not set, the storage is
|
// for which capacity was reported. If not set, the storage is
|
||||||
@ -336,7 +336,7 @@ message CSIStorageCapacity {
|
|||||||
// immutable.
|
// immutable.
|
||||||
//
|
//
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2;
|
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2;
|
||||||
|
|
||||||
// storageClassName represents the name of the StorageClass that the reported capacity applies to.
|
// storageClassName represents the name of the StorageClass that the reported capacity applies to.
|
||||||
// It must meet the same requirements as the name of a StorageClass
|
// It must meet the same requirements as the name of a StorageClass
|
||||||
@ -356,7 +356,7 @@ message CSIStorageCapacity {
|
|||||||
// unavailable.
|
// unavailable.
|
||||||
//
|
//
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4;
|
optional .k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4;
|
||||||
|
|
||||||
// maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse
|
// maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse
|
||||||
// for a GetCapacityRequest with topology and parameters that match the
|
// for a GetCapacityRequest with topology and parameters that match the
|
||||||
@ -370,7 +370,7 @@ message CSIStorageCapacity {
|
|||||||
// API is ResourceRequirements.Requests in a volume claim.
|
// API is ResourceRequirements.Requests in a volume claim.
|
||||||
//
|
//
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5;
|
optional .k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
// CSIStorageCapacityList is a collection of CSIStorageCapacity objects.
|
// CSIStorageCapacityList is a collection of CSIStorageCapacity objects.
|
||||||
@ -378,7 +378,7 @@ message CSIStorageCapacityList {
|
|||||||
// Standard list metadata
|
// Standard list metadata
|
||||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||||
|
|
||||||
// items is the list of CSIStorageCapacity objects.
|
// items is the list of CSIStorageCapacity objects.
|
||||||
repeated CSIStorageCapacity items = 2;
|
repeated CSIStorageCapacity items = 2;
|
||||||
@ -393,7 +393,7 @@ message StorageClass {
|
|||||||
// Standard object's metadata.
|
// Standard object's metadata.
|
||||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||||
|
|
||||||
// provisioner indicates the type of the provisioner.
|
// provisioner indicates the type of the provisioner.
|
||||||
optional string provisioner = 2;
|
optional string provisioner = 2;
|
||||||
@ -431,7 +431,7 @@ message StorageClass {
|
|||||||
// This field is only honored by servers that enable the VolumeScheduling feature.
|
// This field is only honored by servers that enable the VolumeScheduling feature.
|
||||||
// +optional
|
// +optional
|
||||||
// +listType=atomic
|
// +listType=atomic
|
||||||
repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8;
|
repeated .k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
// StorageClassList is a collection of storage classes.
|
// StorageClassList is a collection of storage classes.
|
||||||
@ -439,7 +439,7 @@ message StorageClassList {
|
|||||||
// Standard list metadata
|
// Standard list metadata
|
||||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||||
|
|
||||||
// items is the list of StorageClasses
|
// items is the list of StorageClasses
|
||||||
repeated StorageClass items = 2;
|
repeated StorageClass items = 2;
|
||||||
@ -466,7 +466,7 @@ message VolumeAttachment {
|
|||||||
// Standard object metadata.
|
// Standard object metadata.
|
||||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||||
|
|
||||||
// spec represents specification of the desired attach/detach volume behavior.
|
// spec represents specification of the desired attach/detach volume behavior.
|
||||||
// Populated by the Kubernetes system.
|
// Populated by the Kubernetes system.
|
||||||
@ -484,7 +484,7 @@ message VolumeAttachmentList {
|
|||||||
// Standard list metadata
|
// Standard list metadata
|
||||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||||
|
|
||||||
// items is the list of VolumeAttachments
|
// items is the list of VolumeAttachments
|
||||||
repeated VolumeAttachment items = 2;
|
repeated VolumeAttachment items = 2;
|
||||||
@ -506,7 +506,7 @@ message VolumeAttachmentSource {
|
|||||||
// PersistentVolumeSpec. This field is beta-level and is only
|
// PersistentVolumeSpec. This field is beta-level and is only
|
||||||
// honored by servers that enabled the CSIMigration feature.
|
// honored by servers that enabled the CSIMigration feature.
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2;
|
optional .k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
// VolumeAttachmentSpec is the specification of a VolumeAttachment request.
|
// VolumeAttachmentSpec is the specification of a VolumeAttachment request.
|
||||||
@ -554,7 +554,7 @@ message VolumeAttachmentStatus {
|
|||||||
message VolumeError {
|
message VolumeError {
|
||||||
// time represents the time the error was encountered.
|
// time represents the time the error was encountered.
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1;
|
optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1;
|
||||||
|
|
||||||
// message represents the error encountered during Attach or Detach operation.
|
// message represents the error encountered during Attach or Detach operation.
|
||||||
// This string may be logged, so it should not contain sensitive
|
// This string may be logged, so it should not contain sensitive
|
||||||
|
10
api/vendor/k8s.io/api/storage/v1/types.go
generated
vendored
10
api/vendor/k8s.io/api/storage/v1/types.go
generated
vendored
@ -25,6 +25,7 @@ import (
|
|||||||
// +genclient
|
// +genclient
|
||||||
// +genclient:nonNamespaced
|
// +genclient:nonNamespaced
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.6
|
||||||
|
|
||||||
// StorageClass describes the parameters for a class of storage for
|
// StorageClass describes the parameters for a class of storage for
|
||||||
// which PersistentVolumes can be dynamically provisioned.
|
// which PersistentVolumes can be dynamically provisioned.
|
||||||
@ -79,6 +80,7 @@ type StorageClass struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.6
|
||||||
|
|
||||||
// StorageClassList is a collection of storage classes.
|
// StorageClassList is a collection of storage classes.
|
||||||
type StorageClassList struct {
|
type StorageClassList struct {
|
||||||
@ -112,6 +114,7 @@ const (
|
|||||||
// +genclient
|
// +genclient
|
||||||
// +genclient:nonNamespaced
|
// +genclient:nonNamespaced
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.13
|
||||||
|
|
||||||
// VolumeAttachment captures the intent to attach or detach the specified volume
|
// VolumeAttachment captures the intent to attach or detach the specified volume
|
||||||
// to/from the specified node.
|
// to/from the specified node.
|
||||||
@ -137,6 +140,7 @@ type VolumeAttachment struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.13
|
||||||
|
|
||||||
// VolumeAttachmentList is a collection of VolumeAttachment objects.
|
// VolumeAttachmentList is a collection of VolumeAttachment objects.
|
||||||
type VolumeAttachmentList struct {
|
type VolumeAttachmentList struct {
|
||||||
@ -227,6 +231,7 @@ type VolumeError struct {
|
|||||||
// +genclient
|
// +genclient
|
||||||
// +genclient:nonNamespaced
|
// +genclient:nonNamespaced
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.18
|
||||||
|
|
||||||
// CSIDriver captures information about a Container Storage Interface (CSI)
|
// CSIDriver captures information about a Container Storage Interface (CSI)
|
||||||
// volume driver deployed on the cluster.
|
// volume driver deployed on the cluster.
|
||||||
@ -251,6 +256,7 @@ type CSIDriver struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.18
|
||||||
|
|
||||||
// CSIDriverList is a collection of CSIDriver objects.
|
// CSIDriverList is a collection of CSIDriver objects.
|
||||||
type CSIDriverList struct {
|
type CSIDriverList struct {
|
||||||
@ -491,6 +497,7 @@ const (
|
|||||||
// +genclient
|
// +genclient
|
||||||
// +genclient:nonNamespaced
|
// +genclient:nonNamespaced
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.17
|
||||||
|
|
||||||
// CSINode holds information about all CSI drivers installed on a node.
|
// CSINode holds information about all CSI drivers installed on a node.
|
||||||
// CSI drivers do not need to create the CSINode object directly. As long as
|
// CSI drivers do not need to create the CSINode object directly. As long as
|
||||||
@ -572,6 +579,7 @@ type VolumeNodeResources struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.17
|
||||||
|
|
||||||
// CSINodeList is a collection of CSINode objects.
|
// CSINodeList is a collection of CSINode objects.
|
||||||
type CSINodeList struct {
|
type CSINodeList struct {
|
||||||
@ -588,6 +596,7 @@ type CSINodeList struct {
|
|||||||
|
|
||||||
// +genclient
|
// +genclient
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.24
|
||||||
|
|
||||||
// CSIStorageCapacity stores the result of one CSI GetCapacity call.
|
// CSIStorageCapacity stores the result of one CSI GetCapacity call.
|
||||||
// For a given StorageClass, this describes the available capacity in a
|
// For a given StorageClass, this describes the available capacity in a
|
||||||
@ -673,6 +682,7 @@ type CSIStorageCapacity struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.24
|
||||||
|
|
||||||
// CSIStorageCapacityList is a collection of CSIStorageCapacity objects.
|
// CSIStorageCapacityList is a collection of CSIStorageCapacity objects.
|
||||||
type CSIStorageCapacityList struct {
|
type CSIStorageCapacityList struct {
|
||||||
|
82
api/vendor/k8s.io/api/storage/v1/zz_generated.prerelease-lifecycle.go
generated
vendored
Normal file
82
api/vendor/k8s.io/api/storage/v1/zz_generated.prerelease-lifecycle.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
//go:build !ignore_autogenerated
|
||||||
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package v1
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *CSIDriver) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 18
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *CSIDriverList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 18
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *CSINode) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 17
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *CSINodeList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 17
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *CSIStorageCapacity) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 24
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *CSIStorageCapacityList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 24
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *StorageClass) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 6
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *StorageClassList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 6
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *VolumeAttachment) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 13
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *VolumeAttachmentList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 13
|
||||||
|
}
|
29
api/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
generated
vendored
29
api/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
generated
vendored
@ -25,6 +25,8 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
|
||||||
|
|
||||||
inf "gopkg.in/inf.v0"
|
inf "gopkg.in/inf.v0"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -683,6 +685,12 @@ func (q Quantity) MarshalJSON() ([]byte, error) {
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (q Quantity) MarshalCBOR() ([]byte, error) {
|
||||||
|
// The call to String() should never return the string "<nil>" because the receiver's
|
||||||
|
// address will never be nil.
|
||||||
|
return cbor.Marshal(q.String())
|
||||||
|
}
|
||||||
|
|
||||||
// ToUnstructured implements the value.UnstructuredConverter interface.
|
// ToUnstructured implements the value.UnstructuredConverter interface.
|
||||||
func (q Quantity) ToUnstructured() interface{} {
|
func (q Quantity) ToUnstructured() interface{} {
|
||||||
return q.String()
|
return q.String()
|
||||||
@ -711,6 +719,27 @@ func (q *Quantity) UnmarshalJSON(value []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (q *Quantity) UnmarshalCBOR(value []byte) error {
|
||||||
|
var s *string
|
||||||
|
if err := cbor.Unmarshal(value, &s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if s == nil {
|
||||||
|
q.d.Dec = nil
|
||||||
|
q.i = int64Amount{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
parsed, err := ParseQuantity(strings.TrimSpace(*s))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*q = parsed
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// NewDecimalQuantity returns a new Quantity representing the given
|
// NewDecimalQuantity returns a new Quantity representing the given
|
||||||
// value in the given format.
|
// value in the given format.
|
||||||
func NewDecimalQuantity(b inf.Dec, format Format) *Quantity {
|
func NewDecimalQuantity(b inf.Dec, format Format) *Quantity {
|
||||||
|
13
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go
generated
vendored
13
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go
generated
vendored
@ -18,6 +18,7 @@ package v1
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/utils/ptr"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IsControlledBy checks if the object has a controllerRef set to the given owner
|
// IsControlledBy checks if the object has a controllerRef set to the given owner
|
||||||
@ -36,10 +37,14 @@ func GetControllerOf(controllee Object) *OwnerReference {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
cp := *ref
|
cp := *ref
|
||||||
|
cp.Controller = ptr.To(*ref.Controller)
|
||||||
|
if ref.BlockOwnerDeletion != nil {
|
||||||
|
cp.BlockOwnerDeletion = ptr.To(*ref.BlockOwnerDeletion)
|
||||||
|
}
|
||||||
return &cp
|
return &cp
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetControllerOf returns a pointer to the controllerRef if controllee has a controller
|
// GetControllerOfNoCopy returns a pointer to the controllerRef if controllee has a controller
|
||||||
func GetControllerOfNoCopy(controllee Object) *OwnerReference {
|
func GetControllerOfNoCopy(controllee Object) *OwnerReference {
|
||||||
refs := controllee.GetOwnerReferences()
|
refs := controllee.GetOwnerReferences()
|
||||||
for i := range refs {
|
for i := range refs {
|
||||||
@ -52,14 +57,12 @@ func GetControllerOfNoCopy(controllee Object) *OwnerReference {
|
|||||||
|
|
||||||
// NewControllerRef creates an OwnerReference pointing to the given owner.
|
// NewControllerRef creates an OwnerReference pointing to the given owner.
|
||||||
func NewControllerRef(owner Object, gvk schema.GroupVersionKind) *OwnerReference {
|
func NewControllerRef(owner Object, gvk schema.GroupVersionKind) *OwnerReference {
|
||||||
blockOwnerDeletion := true
|
|
||||||
isController := true
|
|
||||||
return &OwnerReference{
|
return &OwnerReference{
|
||||||
APIVersion: gvk.GroupVersion().String(),
|
APIVersion: gvk.GroupVersion().String(),
|
||||||
Kind: gvk.Kind,
|
Kind: gvk.Kind,
|
||||||
Name: owner.GetName(),
|
Name: owner.GetName(),
|
||||||
UID: owner.GetUID(),
|
UID: owner.GetUID(),
|
||||||
BlockOwnerDeletion: &blockOwnerDeletion,
|
BlockOwnerDeletion: ptr.To(true),
|
||||||
Controller: &isController,
|
Controller: ptr.To(true),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
677
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
generated
vendored
677
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
generated
vendored
@ -329,10 +329,38 @@ func (m *Duration) XXX_DiscardUnknown() {
|
|||||||
|
|
||||||
var xxx_messageInfo_Duration proto.InternalMessageInfo
|
var xxx_messageInfo_Duration proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *FieldSelectorRequirement) Reset() { *m = FieldSelectorRequirement{} }
|
||||||
|
func (*FieldSelectorRequirement) ProtoMessage() {}
|
||||||
|
func (*FieldSelectorRequirement) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_a8431b6e0aeeb761, []int{10}
|
||||||
|
}
|
||||||
|
func (m *FieldSelectorRequirement) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *FieldSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
func (m *FieldSelectorRequirement) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_FieldSelectorRequirement.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *FieldSelectorRequirement) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *FieldSelectorRequirement) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_FieldSelectorRequirement.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_FieldSelectorRequirement proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *FieldsV1) Reset() { *m = FieldsV1{} }
|
func (m *FieldsV1) Reset() { *m = FieldsV1{} }
|
||||||
func (*FieldsV1) ProtoMessage() {}
|
func (*FieldsV1) ProtoMessage() {}
|
||||||
func (*FieldsV1) Descriptor() ([]byte, []int) {
|
func (*FieldsV1) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{10}
|
return fileDescriptor_a8431b6e0aeeb761, []int{11}
|
||||||
}
|
}
|
||||||
func (m *FieldsV1) XXX_Unmarshal(b []byte) error {
|
func (m *FieldsV1) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -360,7 +388,7 @@ var xxx_messageInfo_FieldsV1 proto.InternalMessageInfo
|
|||||||
func (m *GetOptions) Reset() { *m = GetOptions{} }
|
func (m *GetOptions) Reset() { *m = GetOptions{} }
|
||||||
func (*GetOptions) ProtoMessage() {}
|
func (*GetOptions) ProtoMessage() {}
|
||||||
func (*GetOptions) Descriptor() ([]byte, []int) {
|
func (*GetOptions) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{11}
|
return fileDescriptor_a8431b6e0aeeb761, []int{12}
|
||||||
}
|
}
|
||||||
func (m *GetOptions) XXX_Unmarshal(b []byte) error {
|
func (m *GetOptions) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -388,7 +416,7 @@ var xxx_messageInfo_GetOptions proto.InternalMessageInfo
|
|||||||
func (m *GroupKind) Reset() { *m = GroupKind{} }
|
func (m *GroupKind) Reset() { *m = GroupKind{} }
|
||||||
func (*GroupKind) ProtoMessage() {}
|
func (*GroupKind) ProtoMessage() {}
|
||||||
func (*GroupKind) Descriptor() ([]byte, []int) {
|
func (*GroupKind) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{12}
|
return fileDescriptor_a8431b6e0aeeb761, []int{13}
|
||||||
}
|
}
|
||||||
func (m *GroupKind) XXX_Unmarshal(b []byte) error {
|
func (m *GroupKind) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -416,7 +444,7 @@ var xxx_messageInfo_GroupKind proto.InternalMessageInfo
|
|||||||
func (m *GroupResource) Reset() { *m = GroupResource{} }
|
func (m *GroupResource) Reset() { *m = GroupResource{} }
|
||||||
func (*GroupResource) ProtoMessage() {}
|
func (*GroupResource) ProtoMessage() {}
|
||||||
func (*GroupResource) Descriptor() ([]byte, []int) {
|
func (*GroupResource) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{13}
|
return fileDescriptor_a8431b6e0aeeb761, []int{14}
|
||||||
}
|
}
|
||||||
func (m *GroupResource) XXX_Unmarshal(b []byte) error {
|
func (m *GroupResource) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -444,7 +472,7 @@ var xxx_messageInfo_GroupResource proto.InternalMessageInfo
|
|||||||
func (m *GroupVersion) Reset() { *m = GroupVersion{} }
|
func (m *GroupVersion) Reset() { *m = GroupVersion{} }
|
||||||
func (*GroupVersion) ProtoMessage() {}
|
func (*GroupVersion) ProtoMessage() {}
|
||||||
func (*GroupVersion) Descriptor() ([]byte, []int) {
|
func (*GroupVersion) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{14}
|
return fileDescriptor_a8431b6e0aeeb761, []int{15}
|
||||||
}
|
}
|
||||||
func (m *GroupVersion) XXX_Unmarshal(b []byte) error {
|
func (m *GroupVersion) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -472,7 +500,7 @@ var xxx_messageInfo_GroupVersion proto.InternalMessageInfo
|
|||||||
func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} }
|
func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} }
|
||||||
func (*GroupVersionForDiscovery) ProtoMessage() {}
|
func (*GroupVersionForDiscovery) ProtoMessage() {}
|
||||||
func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) {
|
func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{15}
|
return fileDescriptor_a8431b6e0aeeb761, []int{16}
|
||||||
}
|
}
|
||||||
func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error {
|
func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -500,7 +528,7 @@ var xxx_messageInfo_GroupVersionForDiscovery proto.InternalMessageInfo
|
|||||||
func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} }
|
func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} }
|
||||||
func (*GroupVersionKind) ProtoMessage() {}
|
func (*GroupVersionKind) ProtoMessage() {}
|
||||||
func (*GroupVersionKind) Descriptor() ([]byte, []int) {
|
func (*GroupVersionKind) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{16}
|
return fileDescriptor_a8431b6e0aeeb761, []int{17}
|
||||||
}
|
}
|
||||||
func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error {
|
func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -528,7 +556,7 @@ var xxx_messageInfo_GroupVersionKind proto.InternalMessageInfo
|
|||||||
func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} }
|
func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} }
|
||||||
func (*GroupVersionResource) ProtoMessage() {}
|
func (*GroupVersionResource) ProtoMessage() {}
|
||||||
func (*GroupVersionResource) Descriptor() ([]byte, []int) {
|
func (*GroupVersionResource) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{17}
|
return fileDescriptor_a8431b6e0aeeb761, []int{18}
|
||||||
}
|
}
|
||||||
func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error {
|
func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -556,7 +584,7 @@ var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo
|
|||||||
func (m *LabelSelector) Reset() { *m = LabelSelector{} }
|
func (m *LabelSelector) Reset() { *m = LabelSelector{} }
|
||||||
func (*LabelSelector) ProtoMessage() {}
|
func (*LabelSelector) ProtoMessage() {}
|
||||||
func (*LabelSelector) Descriptor() ([]byte, []int) {
|
func (*LabelSelector) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{18}
|
return fileDescriptor_a8431b6e0aeeb761, []int{19}
|
||||||
}
|
}
|
||||||
func (m *LabelSelector) XXX_Unmarshal(b []byte) error {
|
func (m *LabelSelector) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -584,7 +612,7 @@ var xxx_messageInfo_LabelSelector proto.InternalMessageInfo
|
|||||||
func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} }
|
func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} }
|
||||||
func (*LabelSelectorRequirement) ProtoMessage() {}
|
func (*LabelSelectorRequirement) ProtoMessage() {}
|
||||||
func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) {
|
func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{19}
|
return fileDescriptor_a8431b6e0aeeb761, []int{20}
|
||||||
}
|
}
|
||||||
func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error {
|
func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -612,7 +640,7 @@ var xxx_messageInfo_LabelSelectorRequirement proto.InternalMessageInfo
|
|||||||
func (m *List) Reset() { *m = List{} }
|
func (m *List) Reset() { *m = List{} }
|
||||||
func (*List) ProtoMessage() {}
|
func (*List) ProtoMessage() {}
|
||||||
func (*List) Descriptor() ([]byte, []int) {
|
func (*List) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{20}
|
return fileDescriptor_a8431b6e0aeeb761, []int{21}
|
||||||
}
|
}
|
||||||
func (m *List) XXX_Unmarshal(b []byte) error {
|
func (m *List) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -640,7 +668,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo
|
|||||||
func (m *ListMeta) Reset() { *m = ListMeta{} }
|
func (m *ListMeta) Reset() { *m = ListMeta{} }
|
||||||
func (*ListMeta) ProtoMessage() {}
|
func (*ListMeta) ProtoMessage() {}
|
||||||
func (*ListMeta) Descriptor() ([]byte, []int) {
|
func (*ListMeta) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{21}
|
return fileDescriptor_a8431b6e0aeeb761, []int{22}
|
||||||
}
|
}
|
||||||
func (m *ListMeta) XXX_Unmarshal(b []byte) error {
|
func (m *ListMeta) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -668,7 +696,7 @@ var xxx_messageInfo_ListMeta proto.InternalMessageInfo
|
|||||||
func (m *ListOptions) Reset() { *m = ListOptions{} }
|
func (m *ListOptions) Reset() { *m = ListOptions{} }
|
||||||
func (*ListOptions) ProtoMessage() {}
|
func (*ListOptions) ProtoMessage() {}
|
||||||
func (*ListOptions) Descriptor() ([]byte, []int) {
|
func (*ListOptions) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{22}
|
return fileDescriptor_a8431b6e0aeeb761, []int{23}
|
||||||
}
|
}
|
||||||
func (m *ListOptions) XXX_Unmarshal(b []byte) error {
|
func (m *ListOptions) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -696,7 +724,7 @@ var xxx_messageInfo_ListOptions proto.InternalMessageInfo
|
|||||||
func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} }
|
func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} }
|
||||||
func (*ManagedFieldsEntry) ProtoMessage() {}
|
func (*ManagedFieldsEntry) ProtoMessage() {}
|
||||||
func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) {
|
func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{23}
|
return fileDescriptor_a8431b6e0aeeb761, []int{24}
|
||||||
}
|
}
|
||||||
func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error {
|
func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -724,7 +752,7 @@ var xxx_messageInfo_ManagedFieldsEntry proto.InternalMessageInfo
|
|||||||
func (m *MicroTime) Reset() { *m = MicroTime{} }
|
func (m *MicroTime) Reset() { *m = MicroTime{} }
|
||||||
func (*MicroTime) ProtoMessage() {}
|
func (*MicroTime) ProtoMessage() {}
|
||||||
func (*MicroTime) Descriptor() ([]byte, []int) {
|
func (*MicroTime) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{24}
|
return fileDescriptor_a8431b6e0aeeb761, []int{25}
|
||||||
}
|
}
|
||||||
func (m *MicroTime) XXX_Unmarshal(b []byte) error {
|
func (m *MicroTime) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_MicroTime.Unmarshal(m, b)
|
return xxx_messageInfo_MicroTime.Unmarshal(m, b)
|
||||||
@ -747,7 +775,7 @@ var xxx_messageInfo_MicroTime proto.InternalMessageInfo
|
|||||||
func (m *ObjectMeta) Reset() { *m = ObjectMeta{} }
|
func (m *ObjectMeta) Reset() { *m = ObjectMeta{} }
|
||||||
func (*ObjectMeta) ProtoMessage() {}
|
func (*ObjectMeta) ProtoMessage() {}
|
||||||
func (*ObjectMeta) Descriptor() ([]byte, []int) {
|
func (*ObjectMeta) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{25}
|
return fileDescriptor_a8431b6e0aeeb761, []int{26}
|
||||||
}
|
}
|
||||||
func (m *ObjectMeta) XXX_Unmarshal(b []byte) error {
|
func (m *ObjectMeta) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -775,7 +803,7 @@ var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo
|
|||||||
func (m *OwnerReference) Reset() { *m = OwnerReference{} }
|
func (m *OwnerReference) Reset() { *m = OwnerReference{} }
|
||||||
func (*OwnerReference) ProtoMessage() {}
|
func (*OwnerReference) ProtoMessage() {}
|
||||||
func (*OwnerReference) Descriptor() ([]byte, []int) {
|
func (*OwnerReference) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{26}
|
return fileDescriptor_a8431b6e0aeeb761, []int{27}
|
||||||
}
|
}
|
||||||
func (m *OwnerReference) XXX_Unmarshal(b []byte) error {
|
func (m *OwnerReference) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -803,7 +831,7 @@ var xxx_messageInfo_OwnerReference proto.InternalMessageInfo
|
|||||||
func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} }
|
func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} }
|
||||||
func (*PartialObjectMetadata) ProtoMessage() {}
|
func (*PartialObjectMetadata) ProtoMessage() {}
|
||||||
func (*PartialObjectMetadata) Descriptor() ([]byte, []int) {
|
func (*PartialObjectMetadata) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{27}
|
return fileDescriptor_a8431b6e0aeeb761, []int{28}
|
||||||
}
|
}
|
||||||
func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error {
|
func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -831,7 +859,7 @@ var xxx_messageInfo_PartialObjectMetadata proto.InternalMessageInfo
|
|||||||
func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} }
|
func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} }
|
||||||
func (*PartialObjectMetadataList) ProtoMessage() {}
|
func (*PartialObjectMetadataList) ProtoMessage() {}
|
||||||
func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) {
|
func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{28}
|
return fileDescriptor_a8431b6e0aeeb761, []int{29}
|
||||||
}
|
}
|
||||||
func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error {
|
func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -859,7 +887,7 @@ var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo
|
|||||||
func (m *Patch) Reset() { *m = Patch{} }
|
func (m *Patch) Reset() { *m = Patch{} }
|
||||||
func (*Patch) ProtoMessage() {}
|
func (*Patch) ProtoMessage() {}
|
||||||
func (*Patch) Descriptor() ([]byte, []int) {
|
func (*Patch) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{29}
|
return fileDescriptor_a8431b6e0aeeb761, []int{30}
|
||||||
}
|
}
|
||||||
func (m *Patch) XXX_Unmarshal(b []byte) error {
|
func (m *Patch) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -887,7 +915,7 @@ var xxx_messageInfo_Patch proto.InternalMessageInfo
|
|||||||
func (m *PatchOptions) Reset() { *m = PatchOptions{} }
|
func (m *PatchOptions) Reset() { *m = PatchOptions{} }
|
||||||
func (*PatchOptions) ProtoMessage() {}
|
func (*PatchOptions) ProtoMessage() {}
|
||||||
func (*PatchOptions) Descriptor() ([]byte, []int) {
|
func (*PatchOptions) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{30}
|
return fileDescriptor_a8431b6e0aeeb761, []int{31}
|
||||||
}
|
}
|
||||||
func (m *PatchOptions) XXX_Unmarshal(b []byte) error {
|
func (m *PatchOptions) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -915,7 +943,7 @@ var xxx_messageInfo_PatchOptions proto.InternalMessageInfo
|
|||||||
func (m *Preconditions) Reset() { *m = Preconditions{} }
|
func (m *Preconditions) Reset() { *m = Preconditions{} }
|
||||||
func (*Preconditions) ProtoMessage() {}
|
func (*Preconditions) ProtoMessage() {}
|
||||||
func (*Preconditions) Descriptor() ([]byte, []int) {
|
func (*Preconditions) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{31}
|
return fileDescriptor_a8431b6e0aeeb761, []int{32}
|
||||||
}
|
}
|
||||||
func (m *Preconditions) XXX_Unmarshal(b []byte) error {
|
func (m *Preconditions) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -943,7 +971,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo
|
|||||||
func (m *RootPaths) Reset() { *m = RootPaths{} }
|
func (m *RootPaths) Reset() { *m = RootPaths{} }
|
||||||
func (*RootPaths) ProtoMessage() {}
|
func (*RootPaths) ProtoMessage() {}
|
||||||
func (*RootPaths) Descriptor() ([]byte, []int) {
|
func (*RootPaths) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{32}
|
return fileDescriptor_a8431b6e0aeeb761, []int{33}
|
||||||
}
|
}
|
||||||
func (m *RootPaths) XXX_Unmarshal(b []byte) error {
|
func (m *RootPaths) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -971,7 +999,7 @@ var xxx_messageInfo_RootPaths proto.InternalMessageInfo
|
|||||||
func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} }
|
func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} }
|
||||||
func (*ServerAddressByClientCIDR) ProtoMessage() {}
|
func (*ServerAddressByClientCIDR) ProtoMessage() {}
|
||||||
func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) {
|
func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{33}
|
return fileDescriptor_a8431b6e0aeeb761, []int{34}
|
||||||
}
|
}
|
||||||
func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error {
|
func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -999,7 +1027,7 @@ var xxx_messageInfo_ServerAddressByClientCIDR proto.InternalMessageInfo
|
|||||||
func (m *Status) Reset() { *m = Status{} }
|
func (m *Status) Reset() { *m = Status{} }
|
||||||
func (*Status) ProtoMessage() {}
|
func (*Status) ProtoMessage() {}
|
||||||
func (*Status) Descriptor() ([]byte, []int) {
|
func (*Status) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{34}
|
return fileDescriptor_a8431b6e0aeeb761, []int{35}
|
||||||
}
|
}
|
||||||
func (m *Status) XXX_Unmarshal(b []byte) error {
|
func (m *Status) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -1027,7 +1055,7 @@ var xxx_messageInfo_Status proto.InternalMessageInfo
|
|||||||
func (m *StatusCause) Reset() { *m = StatusCause{} }
|
func (m *StatusCause) Reset() { *m = StatusCause{} }
|
||||||
func (*StatusCause) ProtoMessage() {}
|
func (*StatusCause) ProtoMessage() {}
|
||||||
func (*StatusCause) Descriptor() ([]byte, []int) {
|
func (*StatusCause) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{35}
|
return fileDescriptor_a8431b6e0aeeb761, []int{36}
|
||||||
}
|
}
|
||||||
func (m *StatusCause) XXX_Unmarshal(b []byte) error {
|
func (m *StatusCause) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -1055,7 +1083,7 @@ var xxx_messageInfo_StatusCause proto.InternalMessageInfo
|
|||||||
func (m *StatusDetails) Reset() { *m = StatusDetails{} }
|
func (m *StatusDetails) Reset() { *m = StatusDetails{} }
|
||||||
func (*StatusDetails) ProtoMessage() {}
|
func (*StatusDetails) ProtoMessage() {}
|
||||||
func (*StatusDetails) Descriptor() ([]byte, []int) {
|
func (*StatusDetails) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{36}
|
return fileDescriptor_a8431b6e0aeeb761, []int{37}
|
||||||
}
|
}
|
||||||
func (m *StatusDetails) XXX_Unmarshal(b []byte) error {
|
func (m *StatusDetails) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -1083,7 +1111,7 @@ var xxx_messageInfo_StatusDetails proto.InternalMessageInfo
|
|||||||
func (m *TableOptions) Reset() { *m = TableOptions{} }
|
func (m *TableOptions) Reset() { *m = TableOptions{} }
|
||||||
func (*TableOptions) ProtoMessage() {}
|
func (*TableOptions) ProtoMessage() {}
|
||||||
func (*TableOptions) Descriptor() ([]byte, []int) {
|
func (*TableOptions) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{37}
|
return fileDescriptor_a8431b6e0aeeb761, []int{38}
|
||||||
}
|
}
|
||||||
func (m *TableOptions) XXX_Unmarshal(b []byte) error {
|
func (m *TableOptions) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -1111,7 +1139,7 @@ var xxx_messageInfo_TableOptions proto.InternalMessageInfo
|
|||||||
func (m *Time) Reset() { *m = Time{} }
|
func (m *Time) Reset() { *m = Time{} }
|
||||||
func (*Time) ProtoMessage() {}
|
func (*Time) ProtoMessage() {}
|
||||||
func (*Time) Descriptor() ([]byte, []int) {
|
func (*Time) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{38}
|
return fileDescriptor_a8431b6e0aeeb761, []int{39}
|
||||||
}
|
}
|
||||||
func (m *Time) XXX_Unmarshal(b []byte) error {
|
func (m *Time) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_Time.Unmarshal(m, b)
|
return xxx_messageInfo_Time.Unmarshal(m, b)
|
||||||
@ -1134,7 +1162,7 @@ var xxx_messageInfo_Time proto.InternalMessageInfo
|
|||||||
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
||||||
func (*Timestamp) ProtoMessage() {}
|
func (*Timestamp) ProtoMessage() {}
|
||||||
func (*Timestamp) Descriptor() ([]byte, []int) {
|
func (*Timestamp) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{39}
|
return fileDescriptor_a8431b6e0aeeb761, []int{40}
|
||||||
}
|
}
|
||||||
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
|
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -1162,7 +1190,7 @@ var xxx_messageInfo_Timestamp proto.InternalMessageInfo
|
|||||||
func (m *TypeMeta) Reset() { *m = TypeMeta{} }
|
func (m *TypeMeta) Reset() { *m = TypeMeta{} }
|
||||||
func (*TypeMeta) ProtoMessage() {}
|
func (*TypeMeta) ProtoMessage() {}
|
||||||
func (*TypeMeta) Descriptor() ([]byte, []int) {
|
func (*TypeMeta) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{40}
|
return fileDescriptor_a8431b6e0aeeb761, []int{41}
|
||||||
}
|
}
|
||||||
func (m *TypeMeta) XXX_Unmarshal(b []byte) error {
|
func (m *TypeMeta) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -1190,7 +1218,7 @@ var xxx_messageInfo_TypeMeta proto.InternalMessageInfo
|
|||||||
func (m *UpdateOptions) Reset() { *m = UpdateOptions{} }
|
func (m *UpdateOptions) Reset() { *m = UpdateOptions{} }
|
||||||
func (*UpdateOptions) ProtoMessage() {}
|
func (*UpdateOptions) ProtoMessage() {}
|
||||||
func (*UpdateOptions) Descriptor() ([]byte, []int) {
|
func (*UpdateOptions) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{41}
|
return fileDescriptor_a8431b6e0aeeb761, []int{42}
|
||||||
}
|
}
|
||||||
func (m *UpdateOptions) XXX_Unmarshal(b []byte) error {
|
func (m *UpdateOptions) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -1218,7 +1246,7 @@ var xxx_messageInfo_UpdateOptions proto.InternalMessageInfo
|
|||||||
func (m *Verbs) Reset() { *m = Verbs{} }
|
func (m *Verbs) Reset() { *m = Verbs{} }
|
||||||
func (*Verbs) ProtoMessage() {}
|
func (*Verbs) ProtoMessage() {}
|
||||||
func (*Verbs) Descriptor() ([]byte, []int) {
|
func (*Verbs) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{42}
|
return fileDescriptor_a8431b6e0aeeb761, []int{43}
|
||||||
}
|
}
|
||||||
func (m *Verbs) XXX_Unmarshal(b []byte) error {
|
func (m *Verbs) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -1246,7 +1274,7 @@ var xxx_messageInfo_Verbs proto.InternalMessageInfo
|
|||||||
func (m *WatchEvent) Reset() { *m = WatchEvent{} }
|
func (m *WatchEvent) Reset() { *m = WatchEvent{} }
|
||||||
func (*WatchEvent) ProtoMessage() {}
|
func (*WatchEvent) ProtoMessage() {}
|
||||||
func (*WatchEvent) Descriptor() ([]byte, []int) {
|
func (*WatchEvent) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{43}
|
return fileDescriptor_a8431b6e0aeeb761, []int{44}
|
||||||
}
|
}
|
||||||
func (m *WatchEvent) XXX_Unmarshal(b []byte) error {
|
func (m *WatchEvent) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -1282,6 +1310,7 @@ func init() {
|
|||||||
proto.RegisterType((*CreateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions")
|
proto.RegisterType((*CreateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions")
|
||||||
proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions")
|
proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions")
|
||||||
proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration")
|
proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration")
|
||||||
|
proto.RegisterType((*FieldSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement")
|
||||||
proto.RegisterType((*FieldsV1)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1")
|
proto.RegisterType((*FieldsV1)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1")
|
||||||
proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions")
|
proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions")
|
||||||
proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind")
|
proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind")
|
||||||
@ -1326,186 +1355,187 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptor_a8431b6e0aeeb761 = []byte{
|
var fileDescriptor_a8431b6e0aeeb761 = []byte{
|
||||||
// 2853 bytes of a gzipped FileDescriptorProto
|
// 2873 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x4b, 0x6f, 0x24, 0x47,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x5d, 0x6f, 0x23, 0x57,
|
||||||
0xd9, 0x3d, 0x0f, 0x7b, 0xe6, 0x9b, 0x19, 0x3f, 0x6a, 0xbd, 0x30, 0x6b, 0x84, 0xc7, 0xe9, 0x44,
|
0x35, 0x63, 0xc7, 0x89, 0x7d, 0x6c, 0xe7, 0xe3, 0x6e, 0x16, 0xbc, 0x41, 0xc4, 0xe9, 0xb4, 0xaa,
|
||||||
0xd1, 0x06, 0x92, 0x71, 0x76, 0x09, 0xd1, 0x66, 0x43, 0x02, 0x1e, 0xcf, 0x7a, 0xe3, 0x64, 0x1d,
|
0xb6, 0xd0, 0x3a, 0xdd, 0xa5, 0x54, 0xdb, 0x2d, 0x2d, 0xc4, 0xf1, 0x66, 0x9b, 0x76, 0xd3, 0x44,
|
||||||
0x5b, 0xe5, 0xdd, 0x05, 0x42, 0x84, 0xd2, 0x9e, 0x2e, 0x8f, 0x1b, 0xf7, 0x74, 0x4f, 0xaa, 0x7a,
|
0x37, 0xbb, 0x0b, 0x94, 0x0a, 0x75, 0xe2, 0xb9, 0x71, 0x86, 0x8c, 0x67, 0xdc, 0x7b, 0xc7, 0x49,
|
||||||
0xbc, 0x19, 0x38, 0x90, 0x03, 0x08, 0x90, 0x50, 0x14, 0x6e, 0x9c, 0x50, 0x22, 0xf8, 0x01, 0x88,
|
0x0d, 0x0f, 0xf4, 0x01, 0x04, 0x48, 0xa8, 0x2a, 0x6f, 0x3c, 0xa1, 0x56, 0xf0, 0x03, 0x10, 0x4f,
|
||||||
0x13, 0x77, 0x90, 0xc8, 0x31, 0x88, 0x4b, 0x24, 0xd0, 0x28, 0x31, 0x07, 0x8e, 0x88, 0xab, 0x85,
|
0xbc, 0x83, 0x44, 0x1f, 0x8b, 0x78, 0xa9, 0x04, 0xb2, 0xba, 0xe1, 0x81, 0x47, 0xc4, 0x6b, 0x84,
|
||||||
0x04, 0xaa, 0x47, 0x77, 0x57, 0xcf, 0x63, 0xdd, 0x93, 0x5d, 0x22, 0x6e, 0xd3, 0xdf, 0xbb, 0xaa,
|
0x04, 0xba, 0x1f, 0x33, 0x73, 0xc7, 0x1f, 0x9b, 0xf1, 0xee, 0x52, 0xf1, 0xe6, 0x39, 0xdf, 0xf7,
|
||||||
0xbe, 0xfa, 0xea, 0x7b, 0x0c, 0x3c, 0x73, 0x7c, 0x8d, 0xd5, 0x1d, 0x7f, 0xdd, 0xea, 0x3a, 0x1d,
|
0xde, 0x73, 0xce, 0x3d, 0xe7, 0x5c, 0xc3, 0x73, 0x47, 0xd7, 0x58, 0xcd, 0xf1, 0xd7, 0xac, 0x8e,
|
||||||
0xab, 0x75, 0xe4, 0x78, 0x84, 0xf6, 0xd7, 0xbb, 0xc7, 0x6d, 0x0e, 0x60, 0xeb, 0x1d, 0x12, 0x58,
|
0xd3, 0xb6, 0x9a, 0x87, 0x8e, 0x47, 0x68, 0x6f, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0xb6, 0xd6, 0x26,
|
||||||
0xeb, 0x27, 0x57, 0xd6, 0xdb, 0xc4, 0x23, 0xd4, 0x0a, 0x88, 0x5d, 0xef, 0x52, 0x3f, 0xf0, 0xd1,
|
0x81, 0xb5, 0x76, 0x7c, 0x65, 0xad, 0x45, 0x3c, 0x42, 0xad, 0x80, 0xd8, 0xb5, 0x0e, 0xf5, 0x03,
|
||||||
0x63, 0x92, 0xab, 0xae, 0x73, 0xd5, 0xbb, 0xc7, 0x6d, 0x0e, 0x60, 0x75, 0xce, 0x55, 0x3f, 0xb9,
|
0x1f, 0x3d, 0x21, 0xb9, 0x6a, 0x3a, 0x57, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0x56, 0xe3, 0x5c, 0xb5,
|
||||||
0xb2, 0xf2, 0x54, 0xdb, 0x09, 0x8e, 0x7a, 0x07, 0xf5, 0x96, 0xdf, 0x59, 0x6f, 0xfb, 0x6d, 0x7f,
|
0xe3, 0x2b, 0xcb, 0xcf, 0xb4, 0x9c, 0xe0, 0xb0, 0xbb, 0x5f, 0x6b, 0xfa, 0xed, 0xb5, 0x96, 0xdf,
|
||||||
0x5d, 0x30, 0x1f, 0xf4, 0x0e, 0xc5, 0x97, 0xf8, 0x10, 0xbf, 0xa4, 0xd0, 0x95, 0xf5, 0x49, 0xa6,
|
0xf2, 0xd7, 0x04, 0xf3, 0x7e, 0xf7, 0x40, 0x7c, 0x89, 0x0f, 0xf1, 0x4b, 0x0a, 0x5d, 0x5e, 0x1b,
|
||||||
0xd0, 0x9e, 0x17, 0x38, 0x1d, 0x32, 0x6c, 0xc5, 0xca, 0xb3, 0xe7, 0x31, 0xb0, 0xd6, 0x11, 0xe9,
|
0x67, 0x0a, 0xed, 0x7a, 0x81, 0xd3, 0x26, 0x83, 0x56, 0x2c, 0x3f, 0x7f, 0x1e, 0x03, 0x6b, 0x1e,
|
||||||
0x58, 0xc3, 0x7c, 0xe6, 0x9f, 0xb2, 0x50, 0xd8, 0xd8, 0xdb, 0xbe, 0x49, 0xfd, 0x5e, 0x17, 0xad,
|
0x92, 0xb6, 0x35, 0xc8, 0x67, 0xfe, 0x29, 0x0b, 0xf9, 0xf5, 0xdd, 0xad, 0x9b, 0xd4, 0xef, 0x76,
|
||||||
0x41, 0xce, 0xb3, 0x3a, 0xa4, 0x6a, 0xac, 0x19, 0x97, 0x8b, 0x8d, 0xf2, 0x07, 0x83, 0xda, 0xcc,
|
0xd0, 0x2a, 0x4c, 0x7b, 0x56, 0x9b, 0x54, 0x8c, 0x55, 0xe3, 0x72, 0xa1, 0x5e, 0xfa, 0xa8, 0x5f,
|
||||||
0xe9, 0xa0, 0x96, 0x7b, 0xd5, 0xea, 0x10, 0x2c, 0x30, 0xc8, 0x85, 0xc2, 0x09, 0xa1, 0xcc, 0xf1,
|
0x9d, 0x3a, 0xed, 0x57, 0xa7, 0x5f, 0xb7, 0xda, 0x04, 0x0b, 0x0c, 0x72, 0x21, 0x7f, 0x4c, 0x28,
|
||||||
0x3d, 0x56, 0xcd, 0xac, 0x65, 0x2f, 0x97, 0xae, 0xbe, 0x58, 0x4f, 0xb3, 0xfe, 0xba, 0x50, 0x70,
|
0x73, 0x7c, 0x8f, 0x55, 0x32, 0xab, 0xd9, 0xcb, 0xc5, 0xab, 0x2f, 0xd7, 0xd2, 0xac, 0xbf, 0x26,
|
||||||
0x57, 0xb2, 0x6e, 0xf9, 0xb4, 0xe9, 0xb0, 0x96, 0x7f, 0x42, 0x68, 0xbf, 0xb1, 0xa8, 0xb4, 0x14,
|
0x14, 0xdc, 0x95, 0xac, 0x9b, 0x3e, 0x6d, 0x38, 0xac, 0xe9, 0x1f, 0x13, 0xda, 0xab, 0x2f, 0x28,
|
||||||
0x14, 0x92, 0xe1, 0x48, 0x03, 0xfa, 0x91, 0x01, 0x8b, 0x5d, 0x4a, 0x0e, 0x09, 0xa5, 0xc4, 0x56,
|
0x2d, 0x79, 0x85, 0x64, 0x38, 0xd2, 0x80, 0x7e, 0x64, 0xc0, 0x42, 0x87, 0x92, 0x03, 0x42, 0x29,
|
||||||
0xf8, 0x6a, 0x76, 0xcd, 0x78, 0x08, 0x6a, 0xab, 0x4a, 0xed, 0xe2, 0xde, 0x90, 0x7c, 0x3c, 0xa2,
|
0xb1, 0x15, 0xbe, 0x92, 0x5d, 0x35, 0x1e, 0x81, 0xda, 0x8a, 0x52, 0xbb, 0xb0, 0x3b, 0x20, 0x1f,
|
||||||
0x11, 0xfd, 0xda, 0x80, 0x15, 0x46, 0xe8, 0x09, 0xa1, 0x1b, 0xb6, 0x4d, 0x09, 0x63, 0x8d, 0xfe,
|
0x0f, 0x69, 0x44, 0xbf, 0x36, 0x60, 0x99, 0x11, 0x7a, 0x4c, 0xe8, 0xba, 0x6d, 0x53, 0xc2, 0x58,
|
||||||
0xa6, 0xeb, 0x10, 0x2f, 0xd8, 0xdc, 0x6e, 0x62, 0x56, 0xcd, 0x89, 0x7d, 0xf8, 0x7a, 0x3a, 0x83,
|
0xbd, 0xb7, 0xe1, 0x3a, 0xc4, 0x0b, 0x36, 0xb6, 0x1a, 0x98, 0x55, 0xa6, 0xc5, 0x3e, 0x7c, 0x3d,
|
||||||
0xf6, 0x27, 0xc9, 0x69, 0x98, 0xca, 0xa2, 0x95, 0x89, 0x24, 0x0c, 0xdf, 0xc7, 0x0c, 0xf3, 0x10,
|
0x9d, 0x41, 0x7b, 0xe3, 0xe4, 0xd4, 0x4d, 0x65, 0xd1, 0xf2, 0x58, 0x12, 0x86, 0xef, 0x63, 0x86,
|
||||||
0xca, 0xe1, 0x41, 0xde, 0x72, 0x58, 0x80, 0xee, 0xc2, 0x6c, 0x9b, 0x7f, 0xb0, 0xaa, 0x21, 0x0c,
|
0x79, 0x00, 0xa5, 0xf0, 0x20, 0x6f, 0x39, 0x2c, 0x40, 0x77, 0x61, 0xa6, 0xc5, 0x3f, 0x58, 0xc5,
|
||||||
0xac, 0xa7, 0x33, 0x30, 0x94, 0xd1, 0x98, 0x57, 0xf6, 0xcc, 0x8a, 0x4f, 0x86, 0x95, 0x34, 0xf3,
|
0x10, 0x06, 0xd6, 0xd2, 0x19, 0x18, 0xca, 0xa8, 0xcf, 0x29, 0x7b, 0x66, 0xc4, 0x27, 0xc3, 0x4a,
|
||||||
0x67, 0x39, 0x28, 0x6d, 0xec, 0x6d, 0x63, 0xc2, 0xfc, 0x1e, 0x6d, 0x91, 0x14, 0x4e, 0x73, 0x0d,
|
0x9a, 0xf9, 0xb3, 0x69, 0x28, 0xae, 0xef, 0x6e, 0x61, 0xc2, 0xfc, 0x2e, 0x6d, 0x92, 0x14, 0x4e,
|
||||||
0xca, 0xcc, 0xf1, 0xda, 0x3d, 0xd7, 0xa2, 0x1c, 0x5a, 0x9d, 0x15, 0x94, 0xcb, 0x8a, 0xb2, 0xbc,
|
0x73, 0x0d, 0x4a, 0xcc, 0xf1, 0x5a, 0x5d, 0xd7, 0xa2, 0x1c, 0x5a, 0x99, 0x11, 0x94, 0x4b, 0x8a,
|
||||||
0xaf, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x05, 0xe0, 0x12, 0x58, 0xd7, 0x6a, 0x11, 0xbb, 0x9a, 0x59,
|
0xb2, 0xb4, 0xa7, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x05, 0xe0, 0x12, 0x58, 0xc7, 0x6a, 0x12, 0xbb,
|
||||||
0x33, 0x2e, 0x17, 0x1a, 0x48, 0xf1, 0xc1, 0xab, 0x11, 0x06, 0x6b, 0x54, 0xe8, 0x51, 0xc8, 0x0b,
|
0x92, 0x59, 0x35, 0x2e, 0xe7, 0xeb, 0x48, 0xf1, 0xc1, 0xeb, 0x11, 0x06, 0x6b, 0x54, 0xe8, 0x71,
|
||||||
0x4b, 0xab, 0x05, 0xa1, 0xa6, 0xa2, 0xc8, 0xf3, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x13, 0x30, 0xa7,
|
0xc8, 0x09, 0x4b, 0x2b, 0x79, 0xa1, 0xa6, 0xac, 0xc8, 0x73, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x53,
|
||||||
0xbc, 0xac, 0x5a, 0x14, 0x64, 0x0b, 0x8a, 0x6c, 0x2e, 0x74, 0x83, 0x10, 0xcf, 0xd7, 0x77, 0xec,
|
0x30, 0xab, 0xbc, 0xac, 0x52, 0x10, 0x64, 0xf3, 0x8a, 0x6c, 0x36, 0x74, 0x83, 0x10, 0xcf, 0xd7,
|
||||||
0x78, 0xb6, 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0x38, 0x9e, 0x8d, 0x05, 0x06, 0xdd, 0x82, 0xfc, 0x09,
|
0x77, 0xe4, 0x78, 0xb6, 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0x39, 0x9e, 0x8d, 0x05, 0x06, 0xdd, 0x82,
|
||||||
0xa1, 0x07, 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x39, 0xdd, 0x46, 0xdf, 0xe5, 0x2c, 0x8d, 0x22, 0x37,
|
0xdc, 0x31, 0xa1, 0xfb, 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x39, 0xdd, 0x46, 0xdf, 0xe5, 0x2c, 0xf5,
|
||||||
0x4d, 0xfc, 0xc4, 0x52, 0x08, 0xaa, 0x03, 0xb0, 0x23, 0x9f, 0x06, 0x62, 0x79, 0xd5, 0xfc, 0x5a,
|
0x02, 0x37, 0x4d, 0xfc, 0xc4, 0x52, 0x08, 0xaa, 0x01, 0xb0, 0x43, 0x9f, 0x06, 0x62, 0x79, 0x95,
|
||||||
0xf6, 0x72, 0xb1, 0x31, 0xcf, 0xd7, 0xbb, 0x1f, 0x41, 0xb1, 0x46, 0xc1, 0xe9, 0x5b, 0x56, 0x40,
|
0xdc, 0x6a, 0xf6, 0x72, 0xa1, 0x3e, 0xc7, 0xd7, 0xbb, 0x17, 0x41, 0xb1, 0x46, 0xc1, 0xe9, 0x9b,
|
||||||
0xda, 0x3e, 0x75, 0x08, 0xab, 0xce, 0xc5, 0xf4, 0x9b, 0x11, 0x14, 0x6b, 0x14, 0xe8, 0x65, 0x40,
|
0x56, 0x40, 0x5a, 0x3e, 0x75, 0x08, 0xab, 0xcc, 0xc6, 0xf4, 0x1b, 0x11, 0x14, 0x6b, 0x14, 0xe8,
|
||||||
0x2c, 0xf0, 0xa9, 0xd5, 0x26, 0x6a, 0xa9, 0x2f, 0x59, 0xec, 0xa8, 0x0a, 0x62, 0x75, 0x2b, 0x6a,
|
0x55, 0x40, 0x2c, 0xf0, 0xa9, 0xd5, 0x22, 0x6a, 0xa9, 0xaf, 0x58, 0xec, 0xb0, 0x02, 0x62, 0x75,
|
||||||
0x75, 0x68, 0x7f, 0x84, 0x02, 0x8f, 0xe1, 0x32, 0x7f, 0x67, 0xc0, 0x82, 0xe6, 0x0b, 0xc2, 0xef,
|
0xcb, 0x6a, 0x75, 0x68, 0x6f, 0x88, 0x02, 0x8f, 0xe0, 0x32, 0x7f, 0x67, 0xc0, 0xbc, 0xe6, 0x0b,
|
||||||
0xae, 0x41, 0xb9, 0xad, 0xdd, 0x3a, 0xe5, 0x17, 0xd1, 0x69, 0xeb, 0x37, 0x12, 0x27, 0x28, 0x11,
|
0xc2, 0xef, 0xae, 0x41, 0xa9, 0xa5, 0x45, 0x9d, 0xf2, 0x8b, 0xe8, 0xb4, 0xf5, 0x88, 0xc4, 0x09,
|
||||||
0x81, 0x22, 0x55, 0x92, 0xc2, 0xe8, 0x72, 0x25, 0xb5, 0xd3, 0x86, 0x36, 0xc4, 0x9a, 0x34, 0x20,
|
0x4a, 0x44, 0xa0, 0x40, 0x95, 0xa4, 0x30, 0xbb, 0x5c, 0x49, 0xed, 0xb4, 0xa1, 0x0d, 0xb1, 0x26,
|
||||||
0xc3, 0xb1, 0x64, 0xf3, 0x1f, 0x86, 0x70, 0xe0, 0x30, 0xde, 0xa0, 0xcb, 0x5a, 0x4c, 0x33, 0xc4,
|
0x0d, 0xc8, 0x70, 0x2c, 0xd9, 0xfc, 0x87, 0x21, 0x1c, 0x38, 0xcc, 0x37, 0xe8, 0xb2, 0x96, 0xd3,
|
||||||
0xf6, 0x95, 0x27, 0xc4, 0xa3, 0x73, 0x02, 0x41, 0xe6, 0xff, 0x22, 0x10, 0x5c, 0x2f, 0xfc, 0xf2,
|
0x0c, 0xb1, 0x7d, 0xa5, 0x31, 0xf9, 0xe8, 0x9c, 0x44, 0x90, 0xf9, 0xbf, 0x48, 0x04, 0xd7, 0xf3,
|
||||||
0xbd, 0xda, 0xcc, 0xdb, 0x7f, 0x5b, 0x9b, 0x31, 0x7f, 0x61, 0x40, 0x79, 0xa3, 0xdb, 0x75, 0xfb,
|
0xbf, 0xfc, 0xa0, 0x3a, 0xf5, 0xee, 0xdf, 0x56, 0xa7, 0xcc, 0x5f, 0x18, 0x50, 0x5a, 0xef, 0x74,
|
||||||
0xbb, 0xdd, 0x40, 0x2c, 0xc0, 0x84, 0x59, 0x9b, 0xf6, 0x71, 0xcf, 0x53, 0x0b, 0x05, 0x7e, 0xbf,
|
0xdc, 0xde, 0x4e, 0x27, 0x10, 0x0b, 0x30, 0x61, 0xc6, 0xa6, 0x3d, 0xdc, 0xf5, 0xd4, 0x42, 0x81,
|
||||||
0x9b, 0x02, 0x82, 0x15, 0x86, 0xdf, 0x9f, 0x43, 0x9f, 0xb6, 0x88, 0xba, 0x6e, 0xd1, 0xfd, 0xd9,
|
0xc7, 0x77, 0x43, 0x40, 0xb0, 0xc2, 0xf0, 0xf8, 0x39, 0xf0, 0x69, 0x93, 0xa8, 0x70, 0x8b, 0xe2,
|
||||||
0xe2, 0x40, 0x2c, 0x71, 0xfc, 0x90, 0x0f, 0x1d, 0xe2, 0xda, 0x3b, 0x96, 0x67, 0xb5, 0x09, 0x55,
|
0x67, 0x93, 0x03, 0xb1, 0xc4, 0xf1, 0x43, 0x3e, 0x70, 0x88, 0x6b, 0x6f, 0x5b, 0x9e, 0xd5, 0x22,
|
||||||
0x97, 0x23, 0xda, 0xfa, 0x2d, 0x0d, 0x87, 0x13, 0x94, 0xe6, 0x7f, 0x32, 0x50, 0xdc, 0xf4, 0x3d,
|
0x54, 0x05, 0x47, 0xb4, 0xf5, 0x9b, 0x1a, 0x0e, 0x27, 0x28, 0xcd, 0xff, 0x64, 0xa0, 0xb0, 0xe1,
|
||||||
0xdb, 0x09, 0xd4, 0xe5, 0x0a, 0xfa, 0xdd, 0x91, 0xe0, 0x71, 0xbb, 0xdf, 0x25, 0x58, 0x60, 0xd0,
|
0x7b, 0xb6, 0x13, 0xa8, 0xe0, 0x0a, 0x7a, 0x9d, 0xa1, 0xe4, 0x71, 0xbb, 0xd7, 0x21, 0x58, 0x60,
|
||||||
0x73, 0x30, 0xcb, 0x02, 0x2b, 0xe8, 0x31, 0x61, 0x4f, 0xb1, 0xf1, 0x48, 0x18, 0x96, 0xf6, 0x05,
|
0xd0, 0x0b, 0x30, 0xc3, 0x02, 0x2b, 0xe8, 0x32, 0x61, 0x4f, 0xa1, 0xfe, 0x58, 0x98, 0x96, 0xf6,
|
||||||
0xf4, 0x6c, 0x50, 0x5b, 0x88, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xd3, 0xfd, 0x03, 0xb1, 0x51,
|
0x04, 0xf4, 0xac, 0x5f, 0x9d, 0x8f, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xd3, 0xfd, 0x7d, 0xb1,
|
||||||
0xf6, 0x4d, 0xf9, 0xec, 0x85, 0xef, 0x47, 0x36, 0xf6, 0xf4, 0xdd, 0x11, 0x0a, 0x3c, 0x86, 0x0b,
|
0x51, 0xf6, 0x4d, 0x79, 0xed, 0x85, 0xf7, 0x47, 0x36, 0xf6, 0xf4, 0x9d, 0x21, 0x0a, 0x3c, 0x82,
|
||||||
0x9d, 0x00, 0x72, 0x2d, 0x16, 0xdc, 0xa6, 0x96, 0xc7, 0x84, 0xae, 0xdb, 0x4e, 0x87, 0xa8, 0x0b,
|
0x0b, 0x1d, 0x03, 0x72, 0x2d, 0x16, 0xdc, 0xa6, 0x96, 0xc7, 0x84, 0xae, 0xdb, 0x4e, 0x9b, 0xa8,
|
||||||
0xff, 0xa5, 0x74, 0x27, 0xce, 0x39, 0x62, 0xbd, 0xb7, 0x46, 0xa4, 0xe1, 0x31, 0x1a, 0xd0, 0xe3,
|
0x80, 0xff, 0x52, 0xba, 0x13, 0xe7, 0x1c, 0xb1, 0xde, 0x5b, 0x43, 0xd2, 0xf0, 0x08, 0x0d, 0xe8,
|
||||||
0x30, 0x4b, 0x89, 0xc5, 0x7c, 0xaf, 0x9a, 0x17, 0xcb, 0x8f, 0xa2, 0x32, 0x16, 0x50, 0xac, 0xb0,
|
0x49, 0x98, 0xa1, 0xc4, 0x62, 0xbe, 0x57, 0xc9, 0x89, 0xe5, 0x47, 0x59, 0x19, 0x0b, 0x28, 0x56,
|
||||||
0x3c, 0xa0, 0x75, 0x08, 0x63, 0x56, 0x3b, 0x0c, 0xaf, 0x51, 0x40, 0xdb, 0x91, 0x60, 0x1c, 0xe2,
|
0x58, 0x9e, 0xd0, 0xda, 0x84, 0x31, 0xab, 0x15, 0xa6, 0xd7, 0x28, 0xa1, 0x6d, 0x4b, 0x30, 0x0e,
|
||||||
0xcd, 0xdf, 0x1a, 0x50, 0xd9, 0xa4, 0xc4, 0x0a, 0xc8, 0x34, 0x6e, 0xf1, 0xa9, 0x4f, 0x1c, 0x6d,
|
0xf1, 0xe6, 0x6f, 0x0d, 0x28, 0x6f, 0x50, 0x62, 0x05, 0x64, 0x12, 0xb7, 0x78, 0xe0, 0x13, 0x47,
|
||||||
0xc0, 0x82, 0xf8, 0xbe, 0x6b, 0xb9, 0x8e, 0x2d, 0xcf, 0x20, 0x27, 0x98, 0x3f, 0xaf, 0x98, 0x17,
|
0xeb, 0x30, 0x2f, 0xbe, 0xef, 0x5a, 0xae, 0x63, 0xcb, 0x33, 0x98, 0x16, 0xcc, 0x9f, 0x57, 0xcc,
|
||||||
0xb6, 0x92, 0x68, 0x3c, 0x4c, 0x6f, 0xfe, 0x24, 0x0b, 0x95, 0x26, 0x71, 0x49, 0x6c, 0xf2, 0x16,
|
0xf3, 0x9b, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x92, 0x85, 0x72, 0x83, 0xb8, 0x24, 0x36, 0x79,
|
||||||
0xa0, 0x36, 0xb5, 0x5a, 0x64, 0x8f, 0x50, 0xc7, 0xb7, 0xf7, 0x49, 0xcb, 0xf7, 0x6c, 0x26, 0xdc,
|
0x13, 0x50, 0x8b, 0x5a, 0x4d, 0xb2, 0x4b, 0xa8, 0xe3, 0xdb, 0x7b, 0xa4, 0xe9, 0x7b, 0x36, 0x13,
|
||||||
0x28, 0xdb, 0xf8, 0x1c, 0xdf, 0xdf, 0x9b, 0x23, 0x58, 0x3c, 0x86, 0x03, 0xb9, 0x50, 0xe9, 0x52,
|
0x6e, 0x94, 0xad, 0x7f, 0x8e, 0xef, 0xef, 0xcd, 0x21, 0x2c, 0x1e, 0xc1, 0x81, 0x5c, 0x28, 0x77,
|
||||||
0xf1, 0x5b, 0xec, 0xb9, 0xf4, 0xb2, 0xd2, 0xd5, 0xaf, 0xa4, 0x3b, 0xd2, 0x3d, 0x9d, 0xb5, 0xb1,
|
0xa8, 0xf8, 0x2d, 0xf6, 0x5c, 0x7a, 0x59, 0xf1, 0xea, 0x57, 0xd2, 0x1d, 0xe9, 0xae, 0xce, 0x5a,
|
||||||
0x74, 0x3a, 0xa8, 0x55, 0x12, 0x20, 0x9c, 0x14, 0x8e, 0xbe, 0x01, 0x8b, 0x3e, 0xed, 0x1e, 0x59,
|
0x5f, 0x3c, 0xed, 0x57, 0xcb, 0x09, 0x10, 0x4e, 0x0a, 0x47, 0xdf, 0x80, 0x05, 0x9f, 0x76, 0x0e,
|
||||||
0x5e, 0x93, 0x74, 0x89, 0x67, 0x13, 0x2f, 0x60, 0x62, 0x23, 0x0b, 0x8d, 0x65, 0x9e, 0x8b, 0xec,
|
0x2d, 0xaf, 0x41, 0x3a, 0xc4, 0xb3, 0x89, 0x17, 0x30, 0xb1, 0x91, 0xf9, 0xfa, 0x12, 0xaf, 0x45,
|
||||||
0x0e, 0xe1, 0xf0, 0x08, 0x35, 0x7a, 0x0d, 0x96, 0xba, 0xd4, 0xef, 0x5a, 0x6d, 0xb1, 0x31, 0x7b,
|
0x76, 0x06, 0x70, 0x78, 0x88, 0x1a, 0xbd, 0x01, 0x8b, 0x1d, 0xea, 0x77, 0xac, 0x96, 0xd8, 0x98,
|
||||||
0xbe, 0xeb, 0xb4, 0xfa, 0x6a, 0x3b, 0x9f, 0x3c, 0x1d, 0xd4, 0x96, 0xf6, 0x86, 0x91, 0x67, 0x83,
|
0x5d, 0xdf, 0x75, 0x9a, 0x3d, 0xb5, 0x9d, 0x4f, 0x9f, 0xf6, 0xab, 0x8b, 0xbb, 0x83, 0xc8, 0xb3,
|
||||||
0xda, 0x05, 0xb1, 0x75, 0x1c, 0x12, 0x23, 0xf1, 0xa8, 0x18, 0xcd, 0x0d, 0xf2, 0x93, 0xdc, 0xc0,
|
0x7e, 0xf5, 0x82, 0xd8, 0x3a, 0x0e, 0x89, 0x91, 0x78, 0x58, 0x8c, 0xe6, 0x06, 0xb9, 0x71, 0x6e,
|
||||||
0xdc, 0x86, 0x42, 0xb3, 0xa7, 0xee, 0xc4, 0x0b, 0x50, 0xb0, 0xd5, 0x6f, 0xb5, 0xf3, 0xe1, 0xe5,
|
0x60, 0x6e, 0x41, 0xbe, 0xd1, 0x55, 0x31, 0xf1, 0x12, 0xe4, 0x6d, 0xf5, 0x5b, 0xed, 0x7c, 0x18,
|
||||||
0x8c, 0x68, 0xce, 0x06, 0xb5, 0x0a, 0x4f, 0x3f, 0xeb, 0x21, 0x00, 0x47, 0x2c, 0xe6, 0xe3, 0x50,
|
0x9c, 0x11, 0xcd, 0x59, 0xbf, 0x5a, 0xe6, 0xe5, 0x67, 0x2d, 0x04, 0xe0, 0x88, 0xc5, 0xfc, 0x8d,
|
||||||
0x10, 0x07, 0xcf, 0xee, 0x5e, 0x41, 0x8b, 0x90, 0xc5, 0xd6, 0x3d, 0x21, 0xa5, 0x8c, 0xf9, 0x4f,
|
0x01, 0x15, 0x71, 0xf2, 0x7b, 0xc4, 0x25, 0xcd, 0xc0, 0xa7, 0x98, 0xbc, 0xdd, 0x75, 0x28, 0x69,
|
||||||
0x2d, 0x8a, 0xed, 0x02, 0xdc, 0x24, 0x41, 0x78, 0xf0, 0x1b, 0xb0, 0x10, 0x86, 0xf2, 0xe4, 0x0b,
|
0x13, 0x2f, 0x40, 0x5f, 0x84, 0xec, 0x11, 0xe9, 0xa9, 0xbc, 0x50, 0x54, 0x62, 0xb3, 0xaf, 0x91,
|
||||||
0x13, 0x79, 0x13, 0x4e, 0xa2, 0xf1, 0x30, 0xbd, 0xf9, 0x3a, 0x14, 0xc5, 0x2b, 0xc4, 0x9f, 0xf0,
|
0x1e, 0xe6, 0x70, 0x74, 0x03, 0xf2, 0x7e, 0x87, 0xc7, 0xa6, 0x4f, 0x55, 0x5e, 0x78, 0x2a, 0x54,
|
||||||
0x38, 0x5d, 0x30, 0xee, 0x93, 0x2e, 0x84, 0x39, 0x40, 0x66, 0x52, 0x0e, 0xa0, 0x99, 0xeb, 0x42,
|
0xbd, 0xa3, 0xe0, 0x67, 0xfd, 0xea, 0xc5, 0x84, 0xf8, 0x10, 0x81, 0x23, 0x56, 0xbe, 0xe2, 0x63,
|
||||||
0x45, 0xf2, 0x86, 0x09, 0x52, 0x2a, 0x0d, 0x4f, 0x42, 0x21, 0x34, 0x53, 0x69, 0x89, 0x12, 0xe3,
|
0xcb, 0xed, 0x12, 0x7e, 0x0a, 0xd1, 0x8a, 0xef, 0x0a, 0x08, 0x56, 0x18, 0xf3, 0x49, 0xc8, 0x0b,
|
||||||
0x50, 0x10, 0x8e, 0x28, 0x34, 0x6d, 0x47, 0x90, 0x78, 0x51, 0xd3, 0x29, 0xd3, 0xb2, 0x9f, 0xcc,
|
0x31, 0xec, 0xee, 0x15, 0xb4, 0x00, 0x59, 0x6c, 0x9d, 0x08, 0xab, 0x4a, 0x98, 0xff, 0xd4, 0x92,
|
||||||
0xfd, 0xb3, 0x1f, 0x4d, 0xd3, 0x0f, 0xa1, 0x3a, 0x29, 0x9b, 0x7e, 0x80, 0x37, 0x3f, 0xbd, 0x29,
|
0xed, 0x0e, 0xc0, 0x4d, 0x12, 0x84, 0xfe, 0xb9, 0x0e, 0xf3, 0xe1, 0x8d, 0x93, 0xbc, 0x08, 0x23,
|
||||||
0xe6, 0x3b, 0x06, 0x2c, 0xea, 0x92, 0xd2, 0x1f, 0x5f, 0x7a, 0x25, 0xe7, 0x67, 0x7b, 0xda, 0x8e,
|
0xa7, 0xc7, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0xdf, 0x84, 0x82, 0xb8, 0x2c, 0x79, 0xa5, 0x11, 0x57,
|
||||||
0xfc, 0xca, 0x80, 0xe5, 0xc4, 0xd2, 0xa6, 0x3a, 0xf1, 0x29, 0x8c, 0xd2, 0x9d, 0x23, 0x3b, 0x85,
|
0x35, 0xc6, 0x7d, 0xaa, 0x9a, 0xb0, 0x54, 0xc9, 0x8c, 0x2b, 0x55, 0x34, 0x73, 0x5d, 0x28, 0x4b,
|
||||||
0x73, 0xfc, 0x25, 0x03, 0x95, 0x5b, 0xd6, 0x01, 0x71, 0xf7, 0x89, 0x4b, 0x5a, 0x81, 0x4f, 0xd1,
|
0xde, 0xb0, 0x8e, 0x4b, 0xa5, 0xe1, 0x69, 0xc8, 0x87, 0x66, 0x2a, 0x2d, 0x51, 0xfd, 0x1e, 0x0a,
|
||||||
0x0f, 0xa0, 0xd4, 0xb1, 0x82, 0xd6, 0x91, 0x80, 0x86, 0x95, 0x41, 0x33, 0x5d, 0xb0, 0x4b, 0x48,
|
0xc2, 0x11, 0x85, 0xa6, 0xed, 0x10, 0x12, 0x17, 0x7f, 0x3a, 0x65, 0x5a, 0x91, 0x96, 0xb9, 0x7f,
|
||||||
0xaa, 0xef, 0xc4, 0x62, 0x6e, 0x78, 0x01, 0xed, 0x37, 0x2e, 0x28, 0x93, 0x4a, 0x1a, 0x06, 0xeb,
|
0x91, 0xa6, 0x69, 0xfa, 0x21, 0x54, 0xc6, 0x15, 0xfd, 0x0f, 0x51, 0x9a, 0xa4, 0x37, 0xc5, 0x7c,
|
||||||
0xda, 0x44, 0x39, 0x27, 0xbe, 0x6f, 0xbc, 0xd5, 0xe5, 0x69, 0xcb, 0xf4, 0x55, 0x64, 0xc2, 0x04,
|
0xcf, 0x80, 0x05, 0x5d, 0x52, 0xfa, 0xe3, 0x4b, 0xaf, 0xe4, 0xfc, 0xa2, 0x54, 0xdb, 0x91, 0x5f,
|
||||||
0x4c, 0xde, 0xec, 0x39, 0x94, 0x74, 0x88, 0x17, 0xc4, 0xe5, 0xdc, 0xce, 0x90, 0x7c, 0x3c, 0xa2,
|
0x19, 0xb0, 0x94, 0x58, 0xda, 0x44, 0x27, 0x3e, 0x81, 0x51, 0xba, 0x73, 0x64, 0x27, 0x70, 0x8e,
|
||||||
0x71, 0xe5, 0x45, 0x58, 0x1c, 0x36, 0x9e, 0xc7, 0x9f, 0x63, 0xd2, 0x97, 0xe7, 0x85, 0xf9, 0x4f,
|
0xbf, 0x64, 0xa0, 0x7c, 0xcb, 0xda, 0x27, 0x6e, 0x18, 0xa9, 0xe8, 0x07, 0x50, 0x6c, 0x5b, 0x41,
|
||||||
0xb4, 0x0c, 0xf9, 0x13, 0xcb, 0xed, 0xa9, 0xdb, 0x88, 0xe5, 0xc7, 0xf5, 0xcc, 0x35, 0xc3, 0xfc,
|
0xf3, 0x50, 0x40, 0xc3, 0x06, 0xa6, 0x91, 0x2e, 0x27, 0x27, 0x24, 0xd5, 0xb6, 0x63, 0x31, 0x37,
|
||||||
0x8d, 0x01, 0xd5, 0x49, 0x86, 0xa0, 0x2f, 0x6a, 0x82, 0x1a, 0x25, 0x65, 0x55, 0xf6, 0x15, 0xd2,
|
0xbc, 0x80, 0xf6, 0xea, 0x17, 0x94, 0x49, 0x45, 0x0d, 0x83, 0x75, 0x6d, 0xa2, 0xeb, 0x14, 0xdf,
|
||||||
0x97, 0x52, 0x6f, 0x40, 0xc1, 0xef, 0xf2, 0x9c, 0xc2, 0xa7, 0xea, 0xd4, 0x9f, 0x08, 0x4f, 0x72,
|
0x37, 0xde, 0xe9, 0xf0, 0xea, 0x6a, 0xf2, 0x66, 0x37, 0x61, 0x82, 0x96, 0xd5, 0xe2, 0xae, 0x73,
|
||||||
0x57, 0xc1, 0xcf, 0x06, 0xb5, 0x8b, 0x09, 0xf1, 0x21, 0x02, 0x47, 0xac, 0x3c, 0x52, 0x0b, 0x7b,
|
0x7b, 0x40, 0x3e, 0x1e, 0xd2, 0xb8, 0xfc, 0x32, 0x2c, 0x0c, 0x1a, 0xcf, 0xf3, 0x4f, 0x94, 0x15,
|
||||||
0xf8, 0xeb, 0x11, 0x45, 0xea, 0xbb, 0x02, 0x82, 0x15, 0xc6, 0xfc, 0xbd, 0x01, 0x39, 0x91, 0x90,
|
0x65, 0x22, 0x5c, 0x82, 0x9c, 0xc8, 0x53, 0xf2, 0x70, 0xb0, 0xfc, 0xb8, 0x9e, 0xb9, 0x66, 0x88,
|
||||||
0xbf, 0x0e, 0x05, 0xbe, 0x7f, 0xb6, 0x15, 0x58, 0xc2, 0xae, 0xd4, 0xa5, 0x20, 0xe7, 0xde, 0x21,
|
0xf4, 0x3a, 0xce, 0x90, 0x47, 0x94, 0x5e, 0x13, 0xe2, 0x1f, 0x30, 0xbd, 0xfe, 0xde, 0x80, 0x69,
|
||||||
0x81, 0x15, 0x7b, 0x5b, 0x08, 0xc1, 0x91, 0x44, 0x84, 0x21, 0xef, 0x04, 0xa4, 0x13, 0x1e, 0xe4,
|
0xd1, 0x37, 0xbc, 0x09, 0x79, 0xbe, 0x7f, 0xb6, 0x15, 0x58, 0xc2, 0xae, 0xd4, 0x1d, 0x2b, 0xe7,
|
||||||
0x53, 0x13, 0x45, 0xab, 0x46, 0x44, 0x1d, 0x5b, 0xf7, 0x6e, 0xbc, 0x15, 0x10, 0x8f, 0x1f, 0x46,
|
0xde, 0x26, 0x81, 0x15, 0x7b, 0x5b, 0x08, 0xc1, 0x91, 0x44, 0x84, 0x21, 0xe7, 0x04, 0xa4, 0x1d,
|
||||||
0x7c, 0x35, 0xb6, 0xb9, 0x0c, 0x2c, 0x45, 0x99, 0xff, 0x32, 0x20, 0x52, 0xc5, 0x9d, 0x9f, 0x11,
|
0x1e, 0xe4, 0x33, 0x63, 0x45, 0xab, 0x79, 0x49, 0x0d, 0x5b, 0x27, 0x37, 0xde, 0x09, 0x88, 0xc7,
|
||||||
0xf7, 0xf0, 0x96, 0xe3, 0x1d, 0xab, 0x6d, 0x8d, 0xcc, 0xd9, 0x57, 0x70, 0x1c, 0x51, 0x8c, 0x7b,
|
0x0f, 0x23, 0x0e, 0x8d, 0x2d, 0x2e, 0x03, 0x4b, 0x51, 0xe6, 0xbf, 0x0c, 0x88, 0x54, 0x71, 0xe7,
|
||||||
0x1e, 0x32, 0xd3, 0x3d, 0x0f, 0x5c, 0x61, 0xcb, 0xf7, 0x02, 0xc7, 0xeb, 0x8d, 0xdc, 0xb6, 0x4d,
|
0x67, 0xc4, 0x3d, 0xb8, 0xe5, 0x78, 0x47, 0x6a, 0x5b, 0x23, 0x73, 0xf6, 0x14, 0x1c, 0x47, 0x14,
|
||||||
0x05, 0xc7, 0x11, 0x05, 0x4f, 0x44, 0x28, 0xe9, 0x58, 0x8e, 0xe7, 0x78, 0x6d, 0xbe, 0x88, 0x4d,
|
0xa3, 0xae, 0x87, 0xcc, 0x64, 0xd7, 0x03, 0x57, 0xd8, 0xf4, 0xbd, 0xc0, 0xf1, 0xba, 0x43, 0xd1,
|
||||||
0xbf, 0xe7, 0x05, 0xe2, 0x45, 0x56, 0x89, 0x08, 0x1e, 0xc1, 0xe2, 0x31, 0x1c, 0xe6, 0xbf, 0x73,
|
0xb6, 0xa1, 0xe0, 0x38, 0xa2, 0xe0, 0xf5, 0x12, 0x25, 0x6d, 0xcb, 0xf1, 0x1c, 0xaf, 0xc5, 0x17,
|
||||||
0x50, 0xe2, 0x6b, 0x0e, 0xdf, 0xb9, 0xe7, 0xa1, 0xe2, 0xea, 0x5e, 0xa0, 0xd6, 0x7e, 0x51, 0x99,
|
0xb1, 0xe1, 0x77, 0xbd, 0x40, 0x14, 0x0e, 0xaa, 0x5e, 0xc2, 0x43, 0x58, 0x3c, 0x82, 0xc3, 0xfc,
|
||||||
0x92, 0xbc, 0xd7, 0x38, 0x49, 0xcb, 0x99, 0x45, 0x0a, 0x15, 0x31, 0x67, 0x92, 0xcc, 0x5b, 0x3a,
|
0xf7, 0x34, 0x14, 0xf9, 0x9a, 0xc3, 0x7b, 0xee, 0x45, 0x28, 0xbb, 0xba, 0x17, 0xa8, 0xb5, 0x5f,
|
||||||
0x12, 0x27, 0x69, 0x79, 0xf4, 0xba, 0xc7, 0xef, 0x87, 0xca, 0x4c, 0xa2, 0x23, 0xfa, 0x26, 0x07,
|
0x54, 0xa6, 0x24, 0xe3, 0x1a, 0x27, 0x69, 0x39, 0xf3, 0x81, 0x7e, 0x43, 0xab, 0x3d, 0x88, 0x98,
|
||||||
0x62, 0x89, 0x43, 0x3b, 0x70, 0xc1, 0x72, 0x5d, 0xff, 0x9e, 0x00, 0x36, 0x7c, 0xff, 0xb8, 0x63,
|
0x93, 0xd5, 0x41, 0x92, 0x96, 0x67, 0xaf, 0x13, 0x1e, 0x1f, 0xaa, 0x80, 0x8a, 0x8e, 0xe8, 0x9b,
|
||||||
0xd1, 0x63, 0x26, 0x8a, 0xe9, 0x42, 0xe3, 0x0b, 0x8a, 0xe5, 0xc2, 0xc6, 0x28, 0x09, 0x1e, 0xc7,
|
0x1c, 0x88, 0x25, 0x0e, 0x6d, 0xc3, 0x05, 0xcb, 0x75, 0xfd, 0x13, 0x01, 0xac, 0xfb, 0xfe, 0x51,
|
||||||
0x37, 0xee, 0xd8, 0x72, 0x53, 0x1e, 0xdb, 0x11, 0x2c, 0x0f, 0x81, 0xc4, 0x2d, 0x57, 0x95, 0xed,
|
0xdb, 0xa2, 0x47, 0x4c, 0xf4, 0xfc, 0xf9, 0xfa, 0x17, 0x14, 0xcb, 0x85, 0xf5, 0x61, 0x12, 0x3c,
|
||||||
0x33, 0x4a, 0xce, 0x32, 0x1e, 0x43, 0x73, 0x36, 0x01, 0x8e, 0xc7, 0x4a, 0x44, 0xd7, 0x61, 0x9e,
|
0x8a, 0x6f, 0xd4, 0xb1, 0x4d, 0x4f, 0x78, 0x6c, 0x87, 0xb0, 0x34, 0x00, 0x12, 0x51, 0xae, 0x1a,
|
||||||
0x7b, 0xb2, 0xdf, 0x0b, 0xc2, 0xbc, 0x33, 0x2f, 0x8e, 0x1b, 0x9d, 0x0e, 0x6a, 0xf3, 0xb7, 0x13,
|
0xf0, 0xe7, 0x94, 0x9c, 0x25, 0x3c, 0x82, 0xe6, 0x6c, 0x0c, 0x1c, 0x8f, 0x94, 0x88, 0xae, 0xc3,
|
||||||
0x18, 0x3c, 0x44, 0xc9, 0x37, 0xd7, 0x75, 0x3a, 0x4e, 0x50, 0x9d, 0x13, 0x2c, 0xd1, 0xe6, 0xde,
|
0x1c, 0xf7, 0x64, 0xbf, 0x1b, 0x84, 0xe5, 0x71, 0x4e, 0x1c, 0x37, 0x3a, 0xed, 0x57, 0xe7, 0x6e,
|
||||||
0xe2, 0x40, 0x2c, 0x71, 0x09, 0x0f, 0x2c, 0x9c, 0xeb, 0x81, 0x9b, 0xb0, 0xc4, 0x88, 0x67, 0x6f,
|
0x27, 0x30, 0x78, 0x80, 0x92, 0x6f, 0xae, 0xeb, 0xb4, 0x9d, 0xa0, 0x32, 0x2b, 0x58, 0xa2, 0xcd,
|
||||||
0x7b, 0x4e, 0xe0, 0x58, 0xee, 0x8d, 0x13, 0x91, 0x55, 0x96, 0xc4, 0x41, 0x5c, 0xe4, 0x29, 0xe1,
|
0xbd, 0xc5, 0x81, 0x58, 0xe2, 0x12, 0x1e, 0x98, 0x3f, 0xd7, 0x03, 0x37, 0x60, 0x91, 0x11, 0xcf,
|
||||||
0xfe, 0x30, 0x12, 0x8f, 0xd2, 0x9b, 0x7f, 0xce, 0x02, 0x92, 0x09, 0xbb, 0x2d, 0x93, 0x32, 0x19,
|
0xde, 0xf2, 0x9c, 0xc0, 0xb1, 0xdc, 0x1b, 0xc7, 0xa2, 0xf8, 0x2d, 0x8a, 0x83, 0xb8, 0xc8, 0x2b,
|
||||||
0x17, 0x79, 0x59, 0xa1, 0x12, 0x7e, 0x63, 0xa8, 0xac, 0x50, 0xb9, 0x7e, 0x88, 0x47, 0x3b, 0x50,
|
0xd7, 0xbd, 0x41, 0x24, 0x1e, 0xa6, 0x37, 0xff, 0x9c, 0x05, 0x24, 0xfb, 0x0a, 0x5b, 0x16, 0x65,
|
||||||
0x94, 0xf1, 0x29, 0xbe, 0x73, 0xeb, 0x8a, 0xb8, 0xb8, 0x1b, 0x22, 0xce, 0x06, 0xb5, 0x95, 0x84,
|
0x32, 0x2f, 0xf2, 0xee, 0x47, 0xf5, 0x25, 0xc6, 0x40, 0xf7, 0xa3, 0x5a, 0x92, 0x10, 0x8f, 0xb6,
|
||||||
0x9a, 0x08, 0x23, 0x4a, 0xbe, 0x58, 0x02, 0xba, 0x0a, 0x60, 0x75, 0x1d, 0xbd, 0xe9, 0x57, 0x8c,
|
0xa1, 0x20, 0xf3, 0x53, 0x1c, 0x73, 0x6b, 0x8a, 0xb8, 0xb0, 0x13, 0x22, 0xce, 0xfa, 0xd5, 0xe5,
|
||||||
0x5b, 0x3f, 0x71, 0xf9, 0x8e, 0x35, 0x2a, 0xf4, 0x12, 0xe4, 0x82, 0x4f, 0x57, 0x96, 0x15, 0x44,
|
0x84, 0x9a, 0x08, 0x23, 0x3a, 0xd3, 0x58, 0x02, 0xba, 0x0a, 0x60, 0x75, 0x1c, 0x7d, 0x36, 0x59,
|
||||||
0xd5, 0xc9, 0x8b, 0x30, 0x21, 0x81, 0x6b, 0x17, 0x97, 0x82, 0x71, 0xb3, 0x54, 0x45, 0x15, 0x69,
|
0x88, 0x27, 0x54, 0xf1, 0x94, 0x01, 0x6b, 0x54, 0xe8, 0x15, 0x98, 0x0e, 0x1e, 0xac, 0x7b, 0xcc,
|
||||||
0xdf, 0x8a, 0x30, 0x58, 0xa3, 0x42, 0xdf, 0x82, 0xc2, 0xa1, 0xca, 0x67, 0xc5, 0xe9, 0xa6, 0x8e,
|
0x8b, 0xe6, 0x98, 0xf7, 0x8a, 0x42, 0x02, 0xd7, 0x2e, 0x82, 0x82, 0x71, 0xb3, 0x54, 0xe3, 0x17,
|
||||||
0xb3, 0x61, 0x16, 0x2c, 0xfb, 0x0e, 0xe1, 0x17, 0x8e, 0xa4, 0xa1, 0xaf, 0x42, 0x89, 0xf5, 0x0e,
|
0x69, 0xdf, 0x8c, 0x30, 0x58, 0xa3, 0x42, 0xdf, 0x82, 0xfc, 0x81, 0xaa, 0x67, 0xc5, 0xe9, 0xa6,
|
||||||
0xa2, 0x14, 0x40, 0xba, 0x44, 0xf4, 0xde, 0xee, 0xc7, 0x28, 0xac, 0xd3, 0x99, 0x6f, 0x42, 0x71,
|
0xce, 0xb3, 0x61, 0x15, 0x2c, 0xc7, 0x23, 0xe1, 0x17, 0x8e, 0xa4, 0xa1, 0xaf, 0x42, 0x91, 0x75,
|
||||||
0xc7, 0x69, 0x51, 0x5f, 0x14, 0x92, 0x4f, 0xc0, 0x1c, 0x4b, 0x54, 0x49, 0xd1, 0x49, 0x86, 0xae,
|
0xf7, 0xa3, 0x12, 0x40, 0xba, 0x44, 0x74, 0xdf, 0xee, 0xc5, 0x28, 0xac, 0xd3, 0x99, 0x6f, 0x43,
|
||||||
0x1a, 0xe2, 0xb9, 0x8f, 0x7a, 0x96, 0xe7, 0xcb, 0x5a, 0x28, 0x1f, 0xfb, 0xe8, 0xab, 0x1c, 0x88,
|
0x61, 0xdb, 0x69, 0x52, 0x5f, 0xf4, 0xbb, 0x4f, 0xc1, 0x2c, 0x4b, 0x34, 0x73, 0xd1, 0x49, 0x86,
|
||||||
0x25, 0xee, 0xfa, 0x32, 0xcf, 0x32, 0x7e, 0xfa, 0x7e, 0x6d, 0xe6, 0xdd, 0xf7, 0x6b, 0x33, 0xef,
|
0xae, 0x1a, 0xe2, 0xb9, 0x8f, 0x7a, 0x96, 0xe7, 0xcb, 0x96, 0x2d, 0x17, 0xfb, 0xe8, 0xeb, 0x1c,
|
||||||
0xbd, 0xaf, 0x32, 0x8e, 0x3f, 0x00, 0xc0, 0xee, 0xc1, 0xf7, 0x48, 0x4b, 0xc6, 0xee, 0x54, 0xbd,
|
0x88, 0x25, 0xee, 0xfa, 0x12, 0xaf, 0x32, 0x7e, 0xfa, 0x61, 0x75, 0xea, 0xfd, 0x0f, 0xab, 0x53,
|
||||||
0xc1, 0xb0, 0x25, 0x2d, 0x7a, 0x83, 0x99, 0xa1, 0xcc, 0x51, 0xc3, 0xe1, 0x04, 0x25, 0x5a, 0x87,
|
0x1f, 0x7c, 0xa8, 0x2a, 0x8e, 0x3f, 0x00, 0xc0, 0xce, 0xfe, 0xf7, 0x48, 0x53, 0xe6, 0xee, 0x54,
|
||||||
0x62, 0xd4, 0xf5, 0x53, 0xfe, 0xb1, 0x14, 0xfa, 0x5b, 0xd4, 0x1a, 0xc4, 0x31, 0x4d, 0xe2, 0x21,
|
0x23, 0xcc, 0x70, 0x72, 0x2e, 0x46, 0x98, 0x99, 0x81, 0xca, 0x51, 0xc3, 0xe1, 0x04, 0x25, 0x5a,
|
||||||
0xc9, 0x9d, 0xfb, 0x90, 0x34, 0x20, 0xdb, 0x73, 0x6c, 0x55, 0x75, 0x3f, 0x1d, 0x3e, 0xe4, 0x77,
|
0x83, 0x42, 0x34, 0x9c, 0x54, 0xfe, 0xb1, 0x18, 0xfa, 0x5b, 0x34, 0xc1, 0xc4, 0x31, 0x4d, 0xe2,
|
||||||
0xb6, 0x9b, 0x67, 0x83, 0xda, 0x23, 0x93, 0x9a, 0xed, 0x41, 0xbf, 0x4b, 0x58, 0xfd, 0xce, 0x76,
|
0x22, 0x99, 0x3e, 0xf7, 0x22, 0xa9, 0x43, 0xb6, 0xeb, 0xd8, 0x6a, 0x38, 0xf0, 0x6c, 0x78, 0x91,
|
||||||
0x13, 0x73, 0xe6, 0x71, 0x51, 0x6d, 0x76, 0xca, 0xa8, 0x76, 0x15, 0xa0, 0x1d, 0xf7, 0x2e, 0x64,
|
0xdf, 0xd9, 0x6a, 0x9c, 0xf5, 0xab, 0x8f, 0x8d, 0x7b, 0x13, 0x08, 0x7a, 0x1d, 0xc2, 0x6a, 0x77,
|
||||||
0xd0, 0x88, 0x1c, 0x51, 0xeb, 0x59, 0x68, 0x54, 0x88, 0xc1, 0x52, 0x8b, 0xd7, 0xf7, 0xaa, 0x87,
|
0xb6, 0x1a, 0x98, 0x33, 0x8f, 0xca, 0x6a, 0x33, 0x13, 0x66, 0xb5, 0xab, 0x00, 0xad, 0x78, 0xc4,
|
||||||
0xc0, 0x02, 0xab, 0x23, 0xbb, 0xa1, 0xd3, 0xdd, 0x89, 0x4b, 0x4a, 0xcd, 0xd2, 0xe6, 0xb0, 0x30,
|
0x22, 0x93, 0x46, 0xe4, 0x88, 0xda, 0x68, 0x45, 0xa3, 0x42, 0x0c, 0x16, 0x9b, 0x94, 0x58, 0xe1,
|
||||||
0x3c, 0x2a, 0x1f, 0xf9, 0xb0, 0x64, 0xab, 0x32, 0x33, 0x56, 0x5a, 0x9c, 0x5a, 0xa9, 0x88, 0x58,
|
0xa8, 0x83, 0x05, 0x56, 0x5b, 0x0e, 0x6d, 0x27, 0x8b, 0x89, 0x4b, 0x4a, 0xcd, 0xe2, 0xc6, 0xa0,
|
||||||
0xcd, 0x61, 0x41, 0x78, 0x54, 0x36, 0xfa, 0x2e, 0xac, 0x84, 0xc0, 0xd1, 0x5a, 0x5f, 0x44, 0xfd,
|
0x30, 0x3c, 0x2c, 0x1f, 0xf9, 0xb0, 0x68, 0xab, 0x6e, 0x38, 0x56, 0x5a, 0x98, 0x58, 0xa9, 0xc8,
|
||||||
0x6c, 0x63, 0xf5, 0x74, 0x50, 0x5b, 0x69, 0x4e, 0xa4, 0xc2, 0xf7, 0x91, 0x80, 0x6c, 0x98, 0x75,
|
0x58, 0x8d, 0x41, 0x41, 0x78, 0x58, 0x36, 0xfa, 0x2e, 0x2c, 0x87, 0xc0, 0xe1, 0x91, 0x84, 0xc8,
|
||||||
0x65, 0x96, 0x5c, 0x12, 0x99, 0xcd, 0xd7, 0xd2, 0xad, 0x22, 0xf6, 0xfe, 0xba, 0x9e, 0x1d, 0x47,
|
0xfa, 0xd9, 0xfa, 0xca, 0x69, 0xbf, 0xba, 0xdc, 0x18, 0x4b, 0x85, 0xef, 0x23, 0x01, 0xd9, 0x30,
|
||||||
0x7d, 0x1b, 0x95, 0x18, 0x2b, 0xd9, 0xe8, 0x2d, 0x28, 0x59, 0x9e, 0xe7, 0x07, 0x96, 0xec, 0x3e,
|
0xe3, 0xca, 0x2a, 0xb9, 0x28, 0x2a, 0x9b, 0xaf, 0xa5, 0x5b, 0x45, 0xec, 0xfd, 0x35, 0xbd, 0x3a,
|
||||||
0x94, 0x85, 0xaa, 0x8d, 0xa9, 0x55, 0x6d, 0xc4, 0x32, 0x86, 0xb2, 0x71, 0x0d, 0x83, 0x75, 0x55,
|
0x8e, 0xc6, 0x4b, 0xaa, 0x30, 0x56, 0xb2, 0xd1, 0x3b, 0x50, 0xb4, 0x3c, 0xcf, 0x0f, 0x2c, 0x39,
|
||||||
0xe8, 0x1e, 0x2c, 0xf8, 0xf7, 0x3c, 0x42, 0x31, 0x39, 0x24, 0x94, 0x78, 0x2d, 0xc2, 0xaa, 0x15,
|
0x24, 0x29, 0x09, 0x55, 0xeb, 0x13, 0xab, 0x5a, 0x8f, 0x65, 0x0c, 0x54, 0xe3, 0x1a, 0x06, 0xeb,
|
||||||
0xa1, 0xfd, 0x99, 0x94, 0xda, 0x13, 0xcc, 0xb1, 0x4b, 0x27, 0xe1, 0x0c, 0x0f, 0x6b, 0x41, 0x75,
|
0xaa, 0xd0, 0x09, 0xcc, 0xfb, 0x27, 0x1e, 0xa1, 0x98, 0x1c, 0x10, 0x4a, 0xbc, 0x26, 0x61, 0x95,
|
||||||
0x1e, 0x5b, 0x3d, 0xcb, 0x75, 0xbe, 0x4f, 0x28, 0xab, 0xce, 0xc7, 0x0d, 0xeb, 0xad, 0x08, 0x8a,
|
0xb2, 0xd0, 0xfe, 0x5c, 0x4a, 0xed, 0x09, 0xe6, 0xd8, 0xa5, 0x93, 0x70, 0x86, 0x07, 0xb5, 0xa0,
|
||||||
0x35, 0x0a, 0xd4, 0x83, 0x4a, 0x47, 0x7f, 0x32, 0xaa, 0x4b, 0xc2, 0xcc, 0x6b, 0xe9, 0xcc, 0x1c,
|
0x1a, 0xcf, 0xad, 0x9e, 0xe5, 0x3a, 0xdf, 0x27, 0x94, 0x55, 0xe6, 0xe2, 0xb9, 0xfa, 0x66, 0x04,
|
||||||
0x7d, 0xd4, 0xe2, 0x34, 0x28, 0x81, 0xc3, 0x49, 0x2d, 0x2b, 0xcf, 0x41, 0xe9, 0x53, 0x56, 0x08,
|
0xc5, 0x1a, 0x05, 0xea, 0x42, 0xb9, 0xad, 0x5f, 0x19, 0x95, 0x45, 0x61, 0xe6, 0xb5, 0x74, 0x66,
|
||||||
0xbc, 0xc2, 0x18, 0x3e, 0x90, 0xa9, 0x2a, 0x8c, 0x3f, 0x66, 0x60, 0x3e, 0xb9, 0x8d, 0x43, 0xcf,
|
0x0e, 0x5f, 0x6a, 0x71, 0x19, 0x94, 0xc0, 0xe1, 0xa4, 0x96, 0xe5, 0x17, 0xa0, 0xf8, 0x80, 0x1d,
|
||||||
0x61, 0x3e, 0xd5, 0x73, 0x18, 0xd6, 0xb2, 0xc6, 0xc4, 0xc9, 0x45, 0x18, 0x9f, 0xb3, 0x13, 0xe3,
|
0x02, 0xef, 0x30, 0x06, 0x0f, 0x64, 0xa2, 0x0e, 0xe3, 0x8f, 0x19, 0x98, 0x4b, 0x6e, 0xe3, 0xc0,
|
||||||
0xb3, 0x0a, 0x83, 0xb9, 0x07, 0x09, 0x83, 0x75, 0x00, 0x9e, 0xac, 0x50, 0xdf, 0x75, 0x09, 0x15,
|
0x75, 0x98, 0x4b, 0x75, 0x1d, 0x86, 0xbd, 0xac, 0x31, 0xf6, 0x81, 0x25, 0xcc, 0xcf, 0xd9, 0xb1,
|
||||||
0x11, 0xb0, 0xa0, 0x26, 0x14, 0x11, 0x14, 0x6b, 0x14, 0x3c, 0xa5, 0x3e, 0x70, 0xfd, 0xd6, 0xb1,
|
0xf9, 0x59, 0xa5, 0xc1, 0xe9, 0x87, 0x49, 0x83, 0x35, 0x00, 0x5e, 0xac, 0x50, 0xdf, 0x75, 0x09,
|
||||||
0xd8, 0x82, 0xf0, 0xf6, 0x8a, 0xd8, 0x57, 0x90, 0x29, 0x75, 0x63, 0x04, 0x8b, 0xc7, 0x70, 0x98,
|
0x15, 0x19, 0x30, 0xaf, 0x1e, 0x52, 0x22, 0x28, 0xd6, 0x28, 0x78, 0x49, 0xbd, 0xef, 0xfa, 0xcd,
|
||||||
0x7d, 0xb8, 0xb8, 0x67, 0x51, 0x9e, 0xe4, 0xc4, 0x37, 0x45, 0xd4, 0x2c, 0x6f, 0x8c, 0x54, 0x44,
|
0x23, 0xb1, 0x05, 0x61, 0xf4, 0x8a, 0xdc, 0x97, 0x97, 0x25, 0x75, 0x7d, 0x08, 0x8b, 0x47, 0x70,
|
||||||
0x4f, 0x4f, 0x7b, 0xe3, 0xe2, 0xcd, 0x8f, 0x61, 0x71, 0x55, 0x64, 0xfe, 0xd5, 0x80, 0x4b, 0x63,
|
0x98, 0x3d, 0xb8, 0xb8, 0x6b, 0x51, 0x5e, 0xe4, 0xc4, 0x91, 0x22, 0x7a, 0x96, 0xb7, 0x86, 0x3a,
|
||||||
0x75, 0x7f, 0x06, 0x15, 0xd9, 0x1b, 0xc9, 0x8a, 0xec, 0xf9, 0x94, 0xad, 0xcc, 0x71, 0xd6, 0x4e,
|
0xa2, 0x67, 0x27, 0x8d, 0xb8, 0x78, 0xf3, 0x63, 0x58, 0xdc, 0x15, 0x99, 0x7f, 0x35, 0xe0, 0xd2,
|
||||||
0xa8, 0xcf, 0xe6, 0x20, 0xbf, 0xc7, 0x33, 0x61, 0xf3, 0x43, 0x03, 0xca, 0xe2, 0xd7, 0x34, 0x9d,
|
0x48, 0xdd, 0x9f, 0x41, 0x47, 0xf6, 0x56, 0xb2, 0x23, 0x7b, 0x31, 0xe5, 0xc4, 0x75, 0x94, 0xb5,
|
||||||
0xe4, 0x5a, 0x72, 0xc0, 0x50, 0x7c, 0x78, 0xc3, 0x85, 0x87, 0xd1, 0x6a, 0x7e, 0xc7, 0x80, 0x64,
|
0x63, 0xfa, 0xb3, 0x59, 0xc8, 0xed, 0xf2, 0x4a, 0xd8, 0xfc, 0xd8, 0x80, 0x92, 0xf8, 0x35, 0xc9,
|
||||||
0x0f, 0x17, 0xbd, 0x28, 0xaf, 0x80, 0x11, 0x35, 0x59, 0xa7, 0x74, 0xff, 0x17, 0x26, 0x95, 0xa4,
|
0xc0, 0xbb, 0x9a, 0x7c, 0x07, 0x29, 0x3c, 0xba, 0x37, 0x90, 0x47, 0x31, 0x11, 0x7f, 0xcf, 0x80,
|
||||||
0x17, 0x52, 0x75, 0x2b, 0x9f, 0x84, 0x22, 0xf6, 0xfd, 0x60, 0xcf, 0x0a, 0x8e, 0x18, 0xdf, 0xbb,
|
0xe4, 0xa8, 0x19, 0xbd, 0x2c, 0x43, 0xc0, 0x88, 0x66, 0xc1, 0x13, 0xba, 0xff, 0x4b, 0xe3, 0x5a,
|
||||||
0x2e, 0xff, 0xa1, 0xb6, 0x57, 0xec, 0x9d, 0xc0, 0x60, 0x09, 0x37, 0x7f, 0x6e, 0xc0, 0xa5, 0x89,
|
0xd2, 0x0b, 0xa9, 0xa6, 0x95, 0x4f, 0x43, 0x01, 0xfb, 0x7e, 0xb0, 0x6b, 0x05, 0x87, 0x8c, 0xef,
|
||||||
0x73, 0x23, 0x1e, 0x45, 0x5a, 0xd1, 0x97, 0x5a, 0x51, 0xe4, 0xc8, 0x31, 0x1d, 0xd6, 0xa8, 0x78,
|
0x5d, 0x87, 0xff, 0x50, 0xdb, 0x2b, 0xf6, 0x4e, 0x60, 0xb0, 0x84, 0x9b, 0x3f, 0x37, 0xe0, 0xd2,
|
||||||
0x2d, 0x99, 0x18, 0x36, 0x0d, 0xd7, 0x92, 0x09, 0x6d, 0x38, 0x49, 0x6b, 0xfe, 0x33, 0x03, 0x6a,
|
0xd8, 0xe7, 0x2d, 0x9e, 0x45, 0x9a, 0xd1, 0x97, 0x5a, 0x51, 0xe4, 0xc8, 0x31, 0x1d, 0xd6, 0xa8,
|
||||||
0x50, 0xf3, 0x3f, 0x76, 0xfa, 0xc7, 0x87, 0xc6, 0x44, 0xf3, 0xc9, 0x31, 0x51, 0x34, 0x13, 0xd2,
|
0x78, 0x2f, 0x99, 0x78, 0x13, 0x1b, 0xec, 0x25, 0x13, 0xda, 0x70, 0x92, 0xd6, 0xfc, 0x67, 0x06,
|
||||||
0xe6, 0x24, 0xd9, 0xfb, 0xcf, 0x49, 0xd0, 0xb3, 0xd1, 0xe8, 0x45, 0xfa, 0xd0, 0x6a, 0x72, 0xf4,
|
0xd4, 0x7b, 0xd2, 0xff, 0xd8, 0xe9, 0x9f, 0x1c, 0x78, 0xcd, 0x9a, 0x4b, 0xbe, 0x66, 0x45, 0x4f,
|
||||||
0x72, 0x36, 0xa8, 0x95, 0x95, 0xf0, 0xe4, 0x28, 0xe6, 0x35, 0x98, 0xb3, 0x49, 0x60, 0x39, 0xae,
|
0x57, 0xda, 0x73, 0x4e, 0xf6, 0xfe, 0xcf, 0x39, 0xe8, 0xf9, 0xe8, 0x85, 0x48, 0xfa, 0xd0, 0x4a,
|
||||||
0xac, 0x0b, 0x53, 0x0f, 0x13, 0xa4, 0xb0, 0xa6, 0x64, 0x6d, 0x94, 0xb8, 0x4d, 0xea, 0x03, 0x87,
|
0xf2, 0x85, 0xe8, 0xac, 0x5f, 0x2d, 0x29, 0xe1, 0xc9, 0x17, 0xa3, 0x37, 0x60, 0xd6, 0x26, 0x81,
|
||||||
0x02, 0x79, 0xc0, 0x6e, 0xf9, 0xb6, 0xac, 0x48, 0xf2, 0x71, 0xc0, 0xde, 0xf4, 0x6d, 0x82, 0x05,
|
0xe5, 0xb8, 0xb2, 0x2f, 0x4c, 0xfd, 0xe6, 0x21, 0x85, 0x35, 0x24, 0x6b, 0xbd, 0xc8, 0x6d, 0x52,
|
||||||
0xc6, 0x7c, 0xd7, 0x80, 0x92, 0x94, 0xb4, 0x69, 0xf5, 0x18, 0x41, 0x57, 0xa2, 0x55, 0xc8, 0xe3,
|
0x1f, 0x38, 0x14, 0xc8, 0x13, 0x76, 0xd3, 0xb7, 0x65, 0x47, 0x92, 0x8b, 0x13, 0xf6, 0x86, 0x6f,
|
||||||
0xbe, 0xa4, 0xcf, 0xd8, 0xce, 0x06, 0xb5, 0xa2, 0x20, 0x13, 0xc5, 0xcc, 0x98, 0x59, 0x52, 0xe6,
|
0x13, 0x2c, 0x30, 0xe6, 0xfb, 0x06, 0x14, 0xa5, 0xa4, 0x0d, 0xab, 0xcb, 0x08, 0xba, 0x12, 0xad,
|
||||||
0x9c, 0x3d, 0x7a, 0x14, 0xf2, 0xe2, 0x02, 0xa9, 0xcd, 0x8c, 0x87, 0x85, 0x1c, 0x88, 0x25, 0xce,
|
0x42, 0x1e, 0xf7, 0x25, 0xfd, 0x29, 0xf0, 0xac, 0x5f, 0x2d, 0x08, 0x32, 0xd1, 0xcc, 0x8c, 0x78,
|
||||||
0xfc, 0x38, 0x03, 0x95, 0xc4, 0xe2, 0x52, 0xd4, 0x05, 0x51, 0x0b, 0x35, 0x93, 0xa2, 0x2d, 0x3f,
|
0xf2, 0xca, 0x9c, 0xb3, 0x47, 0x8f, 0x43, 0x4e, 0x04, 0x90, 0xda, 0xcc, 0xf8, 0x4d, 0x93, 0x03,
|
||||||
0x79, 0x34, 0xaf, 0x9e, 0xaf, 0xd9, 0x07, 0x79, 0xbe, 0xbe, 0x0d, 0xb3, 0x2d, 0xbe, 0x47, 0xe1,
|
0xb1, 0xc4, 0x99, 0x9f, 0x66, 0xa0, 0x9c, 0x58, 0x5c, 0x8a, 0xbe, 0x20, 0x1a, 0xa1, 0x66, 0x52,
|
||||||
0x3f, 0x3d, 0xae, 0x4c, 0x73, 0x9c, 0x62, 0x77, 0x63, 0x6f, 0x14, 0x9f, 0x0c, 0x2b, 0x81, 0xe8,
|
0x8c, 0xe5, 0xc7, 0xff, 0x83, 0x40, 0x5d, 0x5f, 0x33, 0x0f, 0x73, 0x7d, 0x7d, 0x1b, 0x66, 0x9a,
|
||||||
0x26, 0x2c, 0x51, 0x12, 0xd0, 0xfe, 0xc6, 0x61, 0x40, 0xa8, 0xde, 0x4c, 0xc8, 0xc7, 0xd9, 0x37,
|
0x7c, 0x8f, 0xc2, 0x3f, 0xa4, 0x5c, 0x99, 0xe4, 0x38, 0xc5, 0xee, 0xc6, 0xde, 0x28, 0x3e, 0x19,
|
||||||
0x1e, 0x26, 0xc0, 0xa3, 0x3c, 0xe6, 0x01, 0x94, 0x6f, 0x5b, 0x07, 0x6e, 0x34, 0x1e, 0xc3, 0x50,
|
0x56, 0x02, 0xd1, 0x4d, 0x58, 0xa4, 0x24, 0xa0, 0xbd, 0xf5, 0x83, 0x80, 0x50, 0x7d, 0x98, 0x90,
|
||||||
0x71, 0xbc, 0x96, 0xdb, 0xb3, 0x89, 0x0c, 0xe8, 0x61, 0xf4, 0x0a, 0x2f, 0xed, 0xb6, 0x8e, 0x3c,
|
0x8b, 0xab, 0x6f, 0x3c, 0x48, 0x80, 0x87, 0x79, 0xcc, 0x7d, 0x28, 0xdd, 0xb6, 0xf6, 0xdd, 0xe8,
|
||||||
0x1b, 0xd4, 0x2e, 0x24, 0x00, 0x72, 0x1e, 0x84, 0x93, 0x22, 0x4c, 0x17, 0x72, 0x9f, 0x61, 0x25,
|
0x15, 0x0f, 0x43, 0xd9, 0xf1, 0x9a, 0x6e, 0xd7, 0x26, 0x32, 0xa1, 0x87, 0xd9, 0x2b, 0x0c, 0xda,
|
||||||
0xf9, 0x1d, 0x28, 0xc6, 0xb9, 0xfe, 0x43, 0x56, 0x69, 0xbe, 0x01, 0x05, 0xee, 0xf1, 0x61, 0x8d,
|
0x2d, 0x1d, 0x79, 0xd6, 0xaf, 0x5e, 0x48, 0x00, 0xe4, 0xb3, 0x15, 0x4e, 0x8a, 0x30, 0x5d, 0x98,
|
||||||
0x7a, 0x4e, 0x96, 0x94, 0xcc, 0xbd, 0x32, 0x69, 0x72, 0x2f, 0x31, 0x64, 0xbd, 0xd3, 0xb5, 0x1f,
|
0xfe, 0x0c, 0x3b, 0xc9, 0xef, 0x40, 0x21, 0xae, 0xf5, 0x1f, 0xb1, 0x4a, 0xf3, 0x2d, 0xc8, 0x73,
|
||||||
0x70, 0xc8, 0x9a, 0x79, 0x90, 0x97, 0x2f, 0x3b, 0xe5, 0xcb, 0x77, 0x15, 0xe4, 0x1f, 0x51, 0xf8,
|
0x8f, 0x0f, 0x7b, 0xd4, 0x73, 0xaa, 0xa4, 0x64, 0xed, 0x95, 0x49, 0x53, 0x7b, 0x89, 0xb7, 0xe0,
|
||||||
0x23, 0x23, 0x13, 0x08, 0xed, 0x91, 0xd1, 0xdf, 0x7f, 0x6d, 0xc2, 0xf0, 0x63, 0x03, 0x40, 0xb4,
|
0x3b, 0x1d, 0xfb, 0x21, 0xdf, 0x82, 0x33, 0x0f, 0x73, 0xf3, 0x65, 0x27, 0xbc, 0xf9, 0xae, 0x82,
|
||||||
0xf2, 0x44, 0x1b, 0x29, 0xc5, 0x38, 0xff, 0x0e, 0xcc, 0xfa, 0xd2, 0x23, 0xe5, 0xa0, 0x75, 0xca,
|
0xfc, 0xbf, 0x0c, 0xbf, 0x64, 0x64, 0x01, 0xa1, 0x5d, 0x32, 0xfa, 0xfd, 0xaf, 0xbd, 0x30, 0xfc,
|
||||||
0x7e, 0x71, 0x74, 0x91, 0xa4, 0x4f, 0x62, 0x25, 0xac, 0xf1, 0xf2, 0x07, 0x9f, 0xac, 0xce, 0x7c,
|
0xd8, 0x00, 0x10, 0xa3, 0x3c, 0x31, 0x46, 0x4a, 0xf1, 0xaf, 0x83, 0x3b, 0x30, 0xe3, 0x4b, 0x8f,
|
||||||
0xf8, 0xc9, 0xea, 0xcc, 0x47, 0x9f, 0xac, 0xce, 0xbc, 0x7d, 0xba, 0x6a, 0x7c, 0x70, 0xba, 0x6a,
|
0x94, 0xef, 0xc1, 0x13, 0xce, 0x8b, 0xa3, 0x40, 0x92, 0x3e, 0x89, 0x95, 0xb0, 0xfa, 0xab, 0x1f,
|
||||||
0x7c, 0x78, 0xba, 0x6a, 0x7c, 0x74, 0xba, 0x6a, 0x7c, 0x7c, 0xba, 0x6a, 0xbc, 0xfb, 0xf7, 0xd5,
|
0xdd, 0x5b, 0x99, 0xfa, 0xf8, 0xde, 0xca, 0xd4, 0x27, 0xf7, 0x56, 0xa6, 0xde, 0x3d, 0x5d, 0x31,
|
||||||
0x99, 0xd7, 0x1e, 0x4b, 0xf3, 0x07, 0xbf, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x82, 0xff,
|
0x3e, 0x3a, 0x5d, 0x31, 0x3e, 0x3e, 0x5d, 0x31, 0x3e, 0x39, 0x5d, 0x31, 0x3e, 0x3d, 0x5d, 0x31,
|
||||||
0xd4, 0x07, 0x28, 0x00, 0x00,
|
0xde, 0xff, 0xfb, 0xca, 0xd4, 0x1b, 0x4f, 0xa4, 0xf9, 0x1f, 0xe2, 0x7f, 0x03, 0x00, 0x00, 0xff,
|
||||||
|
0xff, 0xd3, 0xee, 0xe4, 0x1c, 0xae, 0x28, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *APIGroup) Marshal() (dAtA []byte, err error) {
|
func (m *APIGroup) Marshal() (dAtA []byte, err error) {
|
||||||
@ -2025,6 +2055,48 @@ func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||||||
return len(dAtA) - i, nil
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *FieldSelectorRequirement) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *FieldSelectorRequirement) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *FieldSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Values) > 0 {
|
||||||
|
for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
|
||||||
|
i -= len(m.Values[iNdEx])
|
||||||
|
copy(dAtA[i:], m.Values[iNdEx])
|
||||||
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx])))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x1a
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i -= len(m.Operator)
|
||||||
|
copy(dAtA[i:], m.Operator)
|
||||||
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i -= len(m.Key)
|
||||||
|
copy(dAtA[i:], m.Key)
|
||||||
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
return len(dAtA) - i, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (m *FieldsV1) Marshal() (dAtA []byte, err error) {
|
func (m *FieldsV1) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
@ -3714,6 +3786,25 @@ func (m *Duration) Size() (n int) {
|
|||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *FieldSelectorRequirement) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Key)
|
||||||
|
n += 1 + l + sovGenerated(uint64(l))
|
||||||
|
l = len(m.Operator)
|
||||||
|
n += 1 + l + sovGenerated(uint64(l))
|
||||||
|
if len(m.Values) > 0 {
|
||||||
|
for _, s := range m.Values {
|
||||||
|
l = len(s)
|
||||||
|
n += 1 + l + sovGenerated(uint64(l))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
func (m *FieldsV1) Size() (n int) {
|
func (m *FieldsV1) Size() (n int) {
|
||||||
if m == nil {
|
if m == nil {
|
||||||
return 0
|
return 0
|
||||||
@ -4429,6 +4520,18 @@ func (this *Duration) String() string {
|
|||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
func (this *FieldSelectorRequirement) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&FieldSelectorRequirement{`,
|
||||||
|
`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
|
||||||
|
`Operator:` + fmt.Sprintf("%v", this.Operator) + `,`,
|
||||||
|
`Values:` + fmt.Sprintf("%v", this.Values) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
func (this *GetOptions) String() string {
|
func (this *GetOptions) String() string {
|
||||||
if this == nil {
|
if this == nil {
|
||||||
return "nil"
|
return "nil"
|
||||||
@ -6443,6 +6546,152 @@ func (m *Duration) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
func (m *FieldSelectorRequirement) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: FieldSelectorRequirement: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: FieldSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Key = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Operator = FieldSelectorOperator(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 3:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Values = append(m.Values, string(dAtA[iNdEx:postIndex]))
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
func (m *FieldsV1) Unmarshal(dAtA []byte) error {
|
func (m *FieldsV1) Unmarshal(dAtA []byte) error {
|
||||||
l := len(dAtA)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
|
23
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
generated
vendored
23
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
generated
vendored
@ -324,6 +324,25 @@ message Duration {
|
|||||||
optional int64 duration = 1;
|
optional int64 duration = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FieldSelectorRequirement is a selector that contains values, a key, and an operator that
|
||||||
|
// relates the key and values.
|
||||||
|
message FieldSelectorRequirement {
|
||||||
|
// key is the field selector key that the requirement applies to.
|
||||||
|
optional string key = 1;
|
||||||
|
|
||||||
|
// operator represents a key's relationship to a set of values.
|
||||||
|
// Valid operators are In, NotIn, Exists, DoesNotExist.
|
||||||
|
// The list of operators may grow in the future.
|
||||||
|
optional string operator = 2;
|
||||||
|
|
||||||
|
// values is an array of string values.
|
||||||
|
// If the operator is In or NotIn, the values array must be non-empty.
|
||||||
|
// If the operator is Exists or DoesNotExist, the values array must be empty.
|
||||||
|
// +optional
|
||||||
|
// +listType=atomic
|
||||||
|
repeated string values = 3;
|
||||||
|
}
|
||||||
|
|
||||||
// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.
|
// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.
|
||||||
//
|
//
|
||||||
// Each key is either a '.' representing the field itself, and will always map to an empty set,
|
// Each key is either a '.' representing the field itself, and will always map to an empty set,
|
||||||
@ -460,7 +479,7 @@ message List {
|
|||||||
optional ListMeta metadata = 1;
|
optional ListMeta metadata = 1;
|
||||||
|
|
||||||
// List of objects
|
// List of objects
|
||||||
repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2;
|
repeated .k8s.io.apimachinery.pkg.runtime.RawExtension items = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListMeta describes metadata that synthetic resources must have, including lists and
|
// ListMeta describes metadata that synthetic resources must have, including lists and
|
||||||
@ -1209,6 +1228,6 @@ message WatchEvent {
|
|||||||
// * If Type is Deleted: the state of the object immediately before deletion.
|
// * If Type is Deleted: the state of the object immediately before deletion.
|
||||||
// * If Type is Error: *Status is recommended; other types may make sense
|
// * If Type is Error: *Status is recommended; other types may make sense
|
||||||
// depending on context.
|
// depending on context.
|
||||||
optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 2;
|
optional .k8s.io.apimachinery.pkg.runtime.RawExtension object = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
83
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
generated
vendored
83
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
generated
vendored
@ -24,8 +24,10 @@ import (
|
|||||||
|
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
|
||||||
"k8s.io/apimachinery/pkg/selection"
|
"k8s.io/apimachinery/pkg/selection"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
utiljson "k8s.io/apimachinery/pkg/util/json"
|
||||||
)
|
)
|
||||||
|
|
||||||
// LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements
|
// LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements
|
||||||
@ -280,13 +282,20 @@ func (f FieldsV1) MarshalJSON() ([]byte, error) {
|
|||||||
if f.Raw == nil {
|
if f.Raw == nil {
|
||||||
return []byte("null"), nil
|
return []byte("null"), nil
|
||||||
}
|
}
|
||||||
|
if f.getContentType() == fieldsV1InvalidOrValidCBORObject {
|
||||||
|
var u map[string]interface{}
|
||||||
|
if err := cbor.Unmarshal(f.Raw, &u); err != nil {
|
||||||
|
return nil, fmt.Errorf("metav1.FieldsV1 cbor invalid: %w", err)
|
||||||
|
}
|
||||||
|
return utiljson.Marshal(u)
|
||||||
|
}
|
||||||
return f.Raw, nil
|
return f.Raw, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalJSON implements json.Unmarshaler
|
// UnmarshalJSON implements json.Unmarshaler
|
||||||
func (f *FieldsV1) UnmarshalJSON(b []byte) error {
|
func (f *FieldsV1) UnmarshalJSON(b []byte) error {
|
||||||
if f == nil {
|
if f == nil {
|
||||||
return errors.New("metav1.Fields: UnmarshalJSON on nil pointer")
|
return errors.New("metav1.FieldsV1: UnmarshalJSON on nil pointer")
|
||||||
}
|
}
|
||||||
if !bytes.Equal(b, []byte("null")) {
|
if !bytes.Equal(b, []byte("null")) {
|
||||||
f.Raw = append(f.Raw[0:0], b...)
|
f.Raw = append(f.Raw[0:0], b...)
|
||||||
@ -296,3 +305,75 @@ func (f *FieldsV1) UnmarshalJSON(b []byte) error {
|
|||||||
|
|
||||||
var _ json.Marshaler = FieldsV1{}
|
var _ json.Marshaler = FieldsV1{}
|
||||||
var _ json.Unmarshaler = &FieldsV1{}
|
var _ json.Unmarshaler = &FieldsV1{}
|
||||||
|
|
||||||
|
func (f FieldsV1) MarshalCBOR() ([]byte, error) {
|
||||||
|
if f.Raw == nil {
|
||||||
|
return cbor.Marshal(nil)
|
||||||
|
}
|
||||||
|
if f.getContentType() == fieldsV1InvalidOrValidJSONObject {
|
||||||
|
var u map[string]interface{}
|
||||||
|
if err := utiljson.Unmarshal(f.Raw, &u); err != nil {
|
||||||
|
return nil, fmt.Errorf("metav1.FieldsV1 json invalid: %w", err)
|
||||||
|
}
|
||||||
|
return cbor.Marshal(u)
|
||||||
|
}
|
||||||
|
return f.Raw, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var cborNull = []byte{0xf6}
|
||||||
|
|
||||||
|
func (f *FieldsV1) UnmarshalCBOR(b []byte) error {
|
||||||
|
if f == nil {
|
||||||
|
return errors.New("metav1.FieldsV1: UnmarshalCBOR on nil pointer")
|
||||||
|
}
|
||||||
|
if !bytes.Equal(b, cborNull) {
|
||||||
|
f.Raw = append(f.Raw[0:0], b...)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// fieldsV1InvalidOrEmpty indicates that a FieldsV1 either contains no raw bytes or its raw
|
||||||
|
// bytes don't represent an allowable value in any supported encoding.
|
||||||
|
fieldsV1InvalidOrEmpty = iota
|
||||||
|
|
||||||
|
// fieldsV1InvalidOrValidJSONObject indicates that a FieldV1 either contains raw bytes that
|
||||||
|
// are a valid JSON encoding of an allowable value or don't represent an allowable value in
|
||||||
|
// any supported encoding.
|
||||||
|
fieldsV1InvalidOrValidJSONObject
|
||||||
|
|
||||||
|
// fieldsV1InvalidOrValidCBORObject indicates that a FieldV1 either contains raw bytes that
|
||||||
|
// are a valid CBOR encoding of an allowable value or don't represent an allowable value in
|
||||||
|
// any supported encoding.
|
||||||
|
fieldsV1InvalidOrValidCBORObject
|
||||||
|
)
|
||||||
|
|
||||||
|
// getContentType returns one of fieldsV1InvalidOrEmpty, fieldsV1InvalidOrValidJSONObject,
|
||||||
|
// fieldsV1InvalidOrValidCBORObject based on the value of Raw.
|
||||||
|
//
|
||||||
|
// Raw can be encoded in JSON or CBOR and is only valid if it is empty, null, or an object (map)
|
||||||
|
// value. It is invalid if it contains a JSON string, number, boolean, or array. If Raw is nonempty
|
||||||
|
// and represents an allowable value, then the initial byte unambiguously distinguishes a
|
||||||
|
// JSON-encoded value from a CBOR-encoded value.
|
||||||
|
//
|
||||||
|
// A valid JSON-encoded value can begin with any of the four JSON whitespace characters, the first
|
||||||
|
// character 'n' of null, or '{' (0x09, 0x0a, 0x0d, 0x20, 0x6e, or 0x7b, respectively). A valid
|
||||||
|
// CBOR-encoded value can begin with the null simple value, an initial byte with major type "map",
|
||||||
|
// or, if a tag-enclosed map, an initial byte with major type "tag" (0xf6, 0xa0...0xbf, or
|
||||||
|
// 0xc6...0xdb). The two sets of valid initial bytes don't intersect.
|
||||||
|
func (f FieldsV1) getContentType() int {
|
||||||
|
if len(f.Raw) > 0 {
|
||||||
|
p := f.Raw[0]
|
||||||
|
switch p {
|
||||||
|
case 'n', '{', '\t', '\r', '\n', ' ':
|
||||||
|
return fieldsV1InvalidOrValidJSONObject
|
||||||
|
case 0xf6: // null
|
||||||
|
return fieldsV1InvalidOrValidCBORObject
|
||||||
|
default:
|
||||||
|
if p >= 0xa0 && p <= 0xbf /* map */ || p >= 0xc6 && p <= 0xdb /* tag */ {
|
||||||
|
return fieldsV1InvalidOrValidCBORObject
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fieldsV1InvalidOrEmpty
|
||||||
|
}
|
||||||
|
28
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go
generated
vendored
28
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go
generated
vendored
@ -19,6 +19,8 @@ package v1
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
|
||||||
)
|
)
|
||||||
|
|
||||||
const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00"
|
const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00"
|
||||||
@ -129,6 +131,25 @@ func (t *MicroTime) UnmarshalJSON(b []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *MicroTime) UnmarshalCBOR(b []byte) error {
|
||||||
|
var s *string
|
||||||
|
if err := cbor.Unmarshal(b, &s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if s == nil {
|
||||||
|
t.Time = time.Time{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
parsed, err := time.Parse(RFC3339Micro, *s)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Time = parsed.Local()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// UnmarshalQueryParameter converts from a URL query parameter value to an object
|
// UnmarshalQueryParameter converts from a URL query parameter value to an object
|
||||||
func (t *MicroTime) UnmarshalQueryParameter(str string) error {
|
func (t *MicroTime) UnmarshalQueryParameter(str string) error {
|
||||||
if len(str) == 0 {
|
if len(str) == 0 {
|
||||||
@ -160,6 +181,13 @@ func (t MicroTime) MarshalJSON() ([]byte, error) {
|
|||||||
return json.Marshal(t.UTC().Format(RFC3339Micro))
|
return json.Marshal(t.UTC().Format(RFC3339Micro))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t MicroTime) MarshalCBOR() ([]byte, error) {
|
||||||
|
if t.IsZero() {
|
||||||
|
return cbor.Marshal(nil)
|
||||||
|
}
|
||||||
|
return cbor.Marshal(t.UTC().Format(RFC3339Micro))
|
||||||
|
}
|
||||||
|
|
||||||
// OpenAPISchemaType is used by the kube-openapi generator when constructing
|
// OpenAPISchemaType is used by the kube-openapi generator when constructing
|
||||||
// the OpenAPI spec of this type.
|
// the OpenAPI spec of this type.
|
||||||
//
|
//
|
||||||
|
29
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go
generated
vendored
29
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go
generated
vendored
@ -19,6 +19,8 @@ package v1
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Time is a wrapper around time.Time which supports correct
|
// Time is a wrapper around time.Time which supports correct
|
||||||
@ -116,6 +118,25 @@ func (t *Time) UnmarshalJSON(b []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *Time) UnmarshalCBOR(b []byte) error {
|
||||||
|
var s *string
|
||||||
|
if err := cbor.Unmarshal(b, &s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if s == nil {
|
||||||
|
t.Time = time.Time{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
parsed, err := time.Parse(time.RFC3339, *s)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Time = parsed.Local()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// UnmarshalQueryParameter converts from a URL query parameter value to an object
|
// UnmarshalQueryParameter converts from a URL query parameter value to an object
|
||||||
func (t *Time) UnmarshalQueryParameter(str string) error {
|
func (t *Time) UnmarshalQueryParameter(str string) error {
|
||||||
if len(str) == 0 {
|
if len(str) == 0 {
|
||||||
@ -151,6 +172,14 @@ func (t Time) MarshalJSON() ([]byte, error) {
|
|||||||
return buf, nil
|
return buf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t Time) MarshalCBOR() ([]byte, error) {
|
||||||
|
if t.IsZero() {
|
||||||
|
return cbor.Marshal(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cbor.Marshal(t.UTC().Format(time.RFC3339))
|
||||||
|
}
|
||||||
|
|
||||||
// ToUnstructured implements the value.UnstructuredConverter interface.
|
// ToUnstructured implements the value.UnstructuredConverter interface.
|
||||||
func (t Time) ToUnstructured() interface{} {
|
func (t Time) ToUnstructured() interface{} {
|
||||||
if t.IsZero() {
|
if t.IsZero() {
|
||||||
|
27
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
generated
vendored
27
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
generated
vendored
@ -1278,6 +1278,33 @@ const (
|
|||||||
LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist"
|
LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// FieldSelectorRequirement is a selector that contains values, a key, and an operator that
|
||||||
|
// relates the key and values.
|
||||||
|
type FieldSelectorRequirement struct {
|
||||||
|
// key is the field selector key that the requirement applies to.
|
||||||
|
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
|
||||||
|
// operator represents a key's relationship to a set of values.
|
||||||
|
// Valid operators are In, NotIn, Exists, DoesNotExist.
|
||||||
|
// The list of operators may grow in the future.
|
||||||
|
Operator FieldSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=FieldSelectorOperator"`
|
||||||
|
// values is an array of string values.
|
||||||
|
// If the operator is In or NotIn, the values array must be non-empty.
|
||||||
|
// If the operator is Exists or DoesNotExist, the values array must be empty.
|
||||||
|
// +optional
|
||||||
|
// +listType=atomic
|
||||||
|
Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// A field selector operator is the set of operators that can be used in a selector requirement.
|
||||||
|
type FieldSelectorOperator string
|
||||||
|
|
||||||
|
const (
|
||||||
|
FieldSelectorOpIn FieldSelectorOperator = "In"
|
||||||
|
FieldSelectorOpNotIn FieldSelectorOperator = "NotIn"
|
||||||
|
FieldSelectorOpExists FieldSelectorOperator = "Exists"
|
||||||
|
FieldSelectorOpDoesNotExist FieldSelectorOperator = "DoesNotExist"
|
||||||
|
)
|
||||||
|
|
||||||
// ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource
|
// ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource
|
||||||
// that the fieldset applies to.
|
// that the fieldset applies to.
|
||||||
type ManagedFieldsEntry struct {
|
type ManagedFieldsEntry struct {
|
||||||
|
11
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
generated
vendored
11
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
generated
vendored
@ -135,6 +135,17 @@ func (DeleteOptions) SwaggerDoc() map[string]string {
|
|||||||
return map_DeleteOptions
|
return map_DeleteOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var map_FieldSelectorRequirement = map[string]string{
|
||||||
|
"": "FieldSelectorRequirement is a selector that contains values, a key, and an operator that relates the key and values.",
|
||||||
|
"key": "key is the field selector key that the requirement applies to.",
|
||||||
|
"operator": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. The list of operators may grow in the future.",
|
||||||
|
"values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty.",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (FieldSelectorRequirement) SwaggerDoc() map[string]string {
|
||||||
|
return map_FieldSelectorRequirement
|
||||||
|
}
|
||||||
|
|
||||||
var map_FieldsV1 = map[string]string{
|
var map_FieldsV1 = map[string]string{
|
||||||
"": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:<name>', where <name> is the name of a field in a struct, or key in a map 'v:<value>', where <value> is the exact json formatted value of a list item 'i:<index>', where <index> is position of a item in a list 'k:<keys>', where <keys> is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff",
|
"": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:<name>', where <name> is the name of a field in a struct, or key in a map 'v:<value>', where <value> is the exact json formatted value of a list item 'i:<index>', where <index> is position of a item in a list 'k:<keys>', where <keys> is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff",
|
||||||
}
|
}
|
||||||
|
21
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
generated
vendored
21
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
generated
vendored
@ -327,6 +327,27 @@ func (in *Duration) DeepCopy() *Duration {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *FieldSelectorRequirement) DeepCopyInto(out *FieldSelectorRequirement) {
|
||||||
|
*out = *in
|
||||||
|
if in.Values != nil {
|
||||||
|
in, out := &in.Values, &out.Values
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldSelectorRequirement.
|
||||||
|
func (in *FieldSelectorRequirement) DeepCopy() *FieldSelectorRequirement {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(FieldSelectorRequirement)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *FieldsV1) DeepCopyInto(out *FieldsV1) {
|
func (in *FieldsV1) DeepCopyInto(out *FieldsV1) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
20
api/vendor/k8s.io/apimachinery/pkg/labels/selector.go
generated
vendored
20
api/vendor/k8s.io/apimachinery/pkg/labels/selector.go
generated
vendored
@ -45,6 +45,19 @@ var (
|
|||||||
// Requirements is AND of all requirements.
|
// Requirements is AND of all requirements.
|
||||||
type Requirements []Requirement
|
type Requirements []Requirement
|
||||||
|
|
||||||
|
func (r Requirements) String() string {
|
||||||
|
var sb strings.Builder
|
||||||
|
|
||||||
|
for i, requirement := range r {
|
||||||
|
if i > 0 {
|
||||||
|
sb.WriteString(", ")
|
||||||
|
}
|
||||||
|
sb.WriteString(requirement.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
// Selector represents a label selector.
|
// Selector represents a label selector.
|
||||||
type Selector interface {
|
type Selector interface {
|
||||||
// Matches returns true if this selector matches the given set of labels.
|
// Matches returns true if this selector matches the given set of labels.
|
||||||
@ -285,6 +298,13 @@ func (r *Requirement) Values() sets.String {
|
|||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ValuesUnsorted returns a copy of requirement values as passed to NewRequirement without sorting.
|
||||||
|
func (r *Requirement) ValuesUnsorted() []string {
|
||||||
|
ret := make([]string, 0, len(r.strValues))
|
||||||
|
ret = append(ret, r.strValues...)
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
// Equal checks the equality of requirement.
|
// Equal checks the equality of requirement.
|
||||||
func (r Requirement) Equal(x Requirement) bool {
|
func (r Requirement) Equal(x Requirement) bool {
|
||||||
if r.key != x.key {
|
if r.key != x.key {
|
||||||
|
96
api/vendor/k8s.io/apimachinery/pkg/runtime/extension.go
generated
vendored
96
api/vendor/k8s.io/apimachinery/pkg/runtime/extension.go
generated
vendored
@ -18,16 +18,77 @@ package runtime
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
|
||||||
|
"k8s.io/apimachinery/pkg/util/json"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// RawExtension intentionally avoids implementing value.UnstructuredConverter for now because the
|
||||||
|
// signature of ToUnstructured does not allow returning an error value in cases where the conversion
|
||||||
|
// is not possible (content type is unrecognized or bytes don't match content type).
|
||||||
|
func rawToUnstructured(raw []byte, contentType string) (interface{}, error) {
|
||||||
|
switch contentType {
|
||||||
|
case ContentTypeJSON:
|
||||||
|
var u interface{}
|
||||||
|
if err := json.Unmarshal(raw, &u); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse RawExtension bytes as JSON: %w", err)
|
||||||
|
}
|
||||||
|
return u, nil
|
||||||
|
case ContentTypeCBOR:
|
||||||
|
var u interface{}
|
||||||
|
if err := cbor.Unmarshal(raw, &u); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse RawExtension bytes as CBOR: %w", err)
|
||||||
|
}
|
||||||
|
return u, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("cannot convert RawExtension with unrecognized content type to unstructured")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (re RawExtension) guessContentType() string {
|
||||||
|
switch {
|
||||||
|
case bytes.HasPrefix(re.Raw, cborSelfDescribed):
|
||||||
|
return ContentTypeCBOR
|
||||||
|
case len(re.Raw) > 0:
|
||||||
|
switch re.Raw[0] {
|
||||||
|
case '\t', '\r', '\n', ' ', '{', '[', 'n', 't', 'f', '"', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||||
|
// Prefixes for the four whitespace characters, objects, arrays, strings, numbers, true, false, and null.
|
||||||
|
return ContentTypeJSON
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
func (re *RawExtension) UnmarshalJSON(in []byte) error {
|
func (re *RawExtension) UnmarshalJSON(in []byte) error {
|
||||||
if re == nil {
|
if re == nil {
|
||||||
return errors.New("runtime.RawExtension: UnmarshalJSON on nil pointer")
|
return errors.New("runtime.RawExtension: UnmarshalJSON on nil pointer")
|
||||||
}
|
}
|
||||||
if !bytes.Equal(in, []byte("null")) {
|
if bytes.Equal(in, []byte("null")) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
re.Raw = append(re.Raw[0:0], in...)
|
re.Raw = append(re.Raw[0:0], in...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
cborNull = []byte{0xf6}
|
||||||
|
cborSelfDescribed = []byte{0xd9, 0xd9, 0xf7}
|
||||||
|
)
|
||||||
|
|
||||||
|
func (re *RawExtension) UnmarshalCBOR(in []byte) error {
|
||||||
|
if re == nil {
|
||||||
|
return errors.New("runtime.RawExtension: UnmarshalCBOR on nil pointer")
|
||||||
|
}
|
||||||
|
if !bytes.Equal(in, cborNull) {
|
||||||
|
if !bytes.HasPrefix(in, cborSelfDescribed) {
|
||||||
|
// The self-described CBOR tag doesn't change the interpretation of the data
|
||||||
|
// item it encloses, but it is useful as a magic number. Its encoding is
|
||||||
|
// also what is used to implement the CBOR RecognizingDecoder.
|
||||||
|
re.Raw = append(re.Raw[:0], cborSelfDescribed...)
|
||||||
|
}
|
||||||
|
re.Raw = append(re.Raw, in...)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -46,6 +107,35 @@ func (re RawExtension) MarshalJSON() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
return []byte("null"), nil
|
return []byte("null"), nil
|
||||||
}
|
}
|
||||||
// TODO: Check whether ContentType is actually JSON before returning it.
|
|
||||||
|
contentType := re.guessContentType()
|
||||||
|
if contentType == ContentTypeJSON {
|
||||||
return re.Raw, nil
|
return re.Raw, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
u, err := rawToUnstructured(re.Raw, contentType)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return json.Marshal(u)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (re RawExtension) MarshalCBOR() ([]byte, error) {
|
||||||
|
if re.Raw == nil {
|
||||||
|
if re.Object != nil {
|
||||||
|
return cbor.Marshal(re.Object)
|
||||||
|
}
|
||||||
|
return cbor.Marshal(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
contentType := re.guessContentType()
|
||||||
|
if contentType == ContentTypeCBOR {
|
||||||
|
return re.Raw, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
u, err := rawToUnstructured(re.Raw, contentType)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return cbor.Marshal(u)
|
||||||
}
|
}
|
||||||
|
36
api/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go
generated
vendored
Normal file
36
api/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2024 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package direct provides functions for marshaling and unmarshaling between arbitrary Go values and
|
||||||
|
// CBOR data, with behavior that is compatible with that of the CBOR serializer. In particular,
|
||||||
|
// types that implement cbor.Marshaler and cbor.Unmarshaler should use these functions.
|
||||||
|
package direct
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Marshal(src interface{}) ([]byte, error) {
|
||||||
|
return modes.Encode.Marshal(src)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Unmarshal(src []byte, dst interface{}) error {
|
||||||
|
return modes.Decode.Unmarshal(src, dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Diagnose(src []byte) (string, error) {
|
||||||
|
return modes.Diagnostic.Diagnose(src)
|
||||||
|
}
|
65
api/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go
generated
vendored
Normal file
65
api/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2024 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package modes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
var buffers = BufferProvider{p: new(sync.Pool)}
|
||||||
|
|
||||||
|
type buffer struct {
|
||||||
|
bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
type pool interface {
|
||||||
|
Get() interface{}
|
||||||
|
Put(interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
type BufferProvider struct {
|
||||||
|
p pool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BufferProvider) Get() *buffer {
|
||||||
|
if buf, ok := b.p.Get().(*buffer); ok {
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
return &buffer{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BufferProvider) Put(buf *buffer) {
|
||||||
|
if buf.Cap() > 3*1024*1024 /* Default MaxRequestBodyBytes */ {
|
||||||
|
// Objects in a sync.Pool are assumed to be fungible. This is not a good assumption
|
||||||
|
// for pools of *bytes.Buffer because a *bytes.Buffer's underlying array grows as
|
||||||
|
// needed to accommodate writes. In Kubernetes, apiservers tend to encode "small"
|
||||||
|
// objects very frequently and much larger objects (especially large lists) only
|
||||||
|
// occasionally. Under steady load, pooled buffers tend to be borrowed frequently
|
||||||
|
// enough to prevent them from being released. Over time, each buffer is used to
|
||||||
|
// encode a large object and its capacity increases accordingly. The result is that
|
||||||
|
// practically all buffers in the pool retain much more capacity than needed to
|
||||||
|
// encode most objects.
|
||||||
|
|
||||||
|
// As a basic mitigation for the worst case, buffers with more capacity than the
|
||||||
|
// default max request body size are never returned to the pool.
|
||||||
|
// TODO: Optimize for higher buffer utilization.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
buf.Reset()
|
||||||
|
b.p.Put(buf)
|
||||||
|
}
|
422
api/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go
generated
vendored
Normal file
422
api/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go
generated
vendored
Normal file
@ -0,0 +1,422 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2024 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package modes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/fxamacker/cbor/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Returns a non-nil error if and only if the argument's type (or one of its component types, for
|
||||||
|
// composite types) implements json.Marshaler or encoding.TextMarshaler without also implementing
|
||||||
|
// cbor.Marshaler and likewise for the respective Unmarshaler interfaces.
|
||||||
|
//
|
||||||
|
// This is a temporary, graduation-blocking restriction and will be removed in favor of automatic
|
||||||
|
// transcoding between CBOR and JSON/text for these types. This restriction allows CBOR to be
|
||||||
|
// exercised for in-tree and unstructured types while mitigating the risk of mangling out-of-tree
|
||||||
|
// types in client programs.
|
||||||
|
func RejectCustomMarshalers(v interface{}) error {
|
||||||
|
if v == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if err := marshalerCache.getChecker(rv.Type()).check(rv, maxDepth); err != nil {
|
||||||
|
return fmt.Errorf("unable to serialize %T: %w", v, err)
|
||||||
|
}
|
||||||
|
if err := unmarshalerCache.getChecker(rv.Type()).check(rv, maxDepth); err != nil {
|
||||||
|
return fmt.Errorf("unable to serialize %T: %w", v, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recursion depth is limited as a basic mitigation against cyclic objects. Objects created by the
|
||||||
|
// decoder shouldn't be able to contain cycles, but practically any object can be passed to the
|
||||||
|
// encoder.
|
||||||
|
var errMaxDepthExceeded = errors.New("object depth exceeds limit (possible cycle?)")
|
||||||
|
|
||||||
|
// The JSON encoder begins detecting cycles after depth 1000. Use a generous limit here, knowing
|
||||||
|
// that it can might deeply nested acyclic objects. The limit will be removed along with the rest of
|
||||||
|
// this mechanism.
|
||||||
|
const maxDepth = 2048
|
||||||
|
|
||||||
|
var marshalerCache = checkers{
|
||||||
|
cborInterface: reflect.TypeFor[cbor.Marshaler](),
|
||||||
|
nonCBORInterfaces: []reflect.Type{
|
||||||
|
reflect.TypeFor[json.Marshaler](),
|
||||||
|
reflect.TypeFor[encoding.TextMarshaler](),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var unmarshalerCache = checkers{
|
||||||
|
cborInterface: reflect.TypeFor[cbor.Unmarshaler](),
|
||||||
|
nonCBORInterfaces: []reflect.Type{
|
||||||
|
reflect.TypeFor[json.Unmarshaler](),
|
||||||
|
reflect.TypeFor[encoding.TextUnmarshaler](),
|
||||||
|
},
|
||||||
|
assumeAddressableValues: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// checker wraps a function for dynamically checking a value of a specific type for custom JSON
|
||||||
|
// behaviors not matched by a custom CBOR behavior.
|
||||||
|
type checker struct {
|
||||||
|
// check returns a non-nil error if the given value might be marshalled to or from CBOR
|
||||||
|
// using the default behavior for its kind, but marshalled to or from JSON using custom
|
||||||
|
// behavior.
|
||||||
|
check func(rv reflect.Value, depth int) error
|
||||||
|
|
||||||
|
// safe returns true if all values of this type are safe from mismatched custom marshalers.
|
||||||
|
safe func() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: stale
|
||||||
|
// Having a single addressable checker for comparisons lets us prune and collapse parts of the
|
||||||
|
// object traversal that are statically known to be safe. Depending on the type, it may be
|
||||||
|
// unnecessary to inspect each value of that type. For example, no value of the built-in type bool
|
||||||
|
// can implement json.Marshaler (a named type whose underlying type is bool could, but it is a
|
||||||
|
// distinct type from bool).
|
||||||
|
var noop = checker{
|
||||||
|
safe: func() bool {
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
check: func(rv reflect.Value, depth int) error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
type checkers struct {
|
||||||
|
m sync.Map // reflect.Type => *checker
|
||||||
|
|
||||||
|
cborInterface reflect.Type
|
||||||
|
nonCBORInterfaces []reflect.Type
|
||||||
|
|
||||||
|
assumeAddressableValues bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cache *checkers) getChecker(rt reflect.Type) checker {
|
||||||
|
if ptr, ok := cache.m.Load(rt); ok {
|
||||||
|
return *ptr.(*checker)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cache.getCheckerInternal(rt, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// linked list node representing the path from a composite type to an element type
|
||||||
|
type path struct {
|
||||||
|
Type reflect.Type
|
||||||
|
Parent *path
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p path) cyclic(rt reflect.Type) bool {
|
||||||
|
for ancestor := &p; ancestor != nil; ancestor = ancestor.Parent {
|
||||||
|
if ancestor.Type == rt {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cache *checkers) getCheckerInternal(rt reflect.Type, parent *path) (c checker) {
|
||||||
|
// Store a placeholder cache entry first to handle cyclic types.
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(1)
|
||||||
|
defer wg.Done()
|
||||||
|
c = checker{
|
||||||
|
safe: func() bool {
|
||||||
|
wg.Wait()
|
||||||
|
return c.safe()
|
||||||
|
},
|
||||||
|
check: func(rv reflect.Value, depth int) error {
|
||||||
|
wg.Wait()
|
||||||
|
return c.check(rv, depth)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if actual, loaded := cache.m.LoadOrStore(rt, &c); loaded {
|
||||||
|
// Someone else stored an entry for this type, use it.
|
||||||
|
return *actual.(*checker)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Take a nonreflective path for the unstructured container types. They're common and
|
||||||
|
// usually nested inside one another.
|
||||||
|
switch rt {
|
||||||
|
case reflect.TypeFor[map[string]interface{}](), reflect.TypeFor[[]interface{}]():
|
||||||
|
return checker{
|
||||||
|
safe: func() bool {
|
||||||
|
return false
|
||||||
|
},
|
||||||
|
check: func(rv reflect.Value, depth int) error {
|
||||||
|
return checkUnstructuredValue(cache, rv.Interface(), depth)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// It's possible that one of the relevant interfaces is implemented on a type with a pointer
|
||||||
|
// receiver, but that a particular value of that type is not addressable. For example:
|
||||||
|
//
|
||||||
|
// func (Foo) MarshalText() ([]byte, error) { ... }
|
||||||
|
// func (*Foo) MarshalCBOR() ([]byte, error) { ... }
|
||||||
|
//
|
||||||
|
// Both methods are in the method set of *Foo, but the method set of Foo contains only
|
||||||
|
// MarshalText.
|
||||||
|
//
|
||||||
|
// Both the unmarshaler and marshaler checks assume that methods implementing a JSON or text
|
||||||
|
// interface with a pointer receiver are always accessible. Only the unmarshaler check
|
||||||
|
// assumes that CBOR methods with pointer receivers are accessible.
|
||||||
|
|
||||||
|
if rt.Implements(cache.cborInterface) {
|
||||||
|
return noop
|
||||||
|
}
|
||||||
|
for _, unsafe := range cache.nonCBORInterfaces {
|
||||||
|
if rt.Implements(unsafe) {
|
||||||
|
err := fmt.Errorf("%v implements %v without corresponding cbor interface", rt, unsafe)
|
||||||
|
return checker{
|
||||||
|
safe: func() bool {
|
||||||
|
return false
|
||||||
|
},
|
||||||
|
check: func(reflect.Value, int) error {
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if cache.assumeAddressableValues && reflect.PointerTo(rt).Implements(cache.cborInterface) {
|
||||||
|
return noop
|
||||||
|
}
|
||||||
|
for _, unsafe := range cache.nonCBORInterfaces {
|
||||||
|
if reflect.PointerTo(rt).Implements(unsafe) {
|
||||||
|
err := fmt.Errorf("%v implements %v without corresponding cbor interface", reflect.PointerTo(rt), unsafe)
|
||||||
|
return checker{
|
||||||
|
safe: func() bool {
|
||||||
|
return false
|
||||||
|
},
|
||||||
|
check: func(reflect.Value, int) error {
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self := &path{Type: rt, Parent: parent}
|
||||||
|
|
||||||
|
switch rt.Kind() {
|
||||||
|
case reflect.Array:
|
||||||
|
ce := cache.getCheckerInternal(rt.Elem(), self)
|
||||||
|
rtlen := rt.Len()
|
||||||
|
if rtlen == 0 || (!self.cyclic(rt.Elem()) && ce.safe()) {
|
||||||
|
return noop
|
||||||
|
}
|
||||||
|
return checker{
|
||||||
|
safe: func() bool {
|
||||||
|
return false
|
||||||
|
},
|
||||||
|
check: func(rv reflect.Value, depth int) error {
|
||||||
|
if depth <= 0 {
|
||||||
|
return errMaxDepthExceeded
|
||||||
|
}
|
||||||
|
for i := 0; i < rtlen; i++ {
|
||||||
|
if err := ce.check(rv.Index(i), depth-1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Interface:
|
||||||
|
// All interface values have to be checked because their dynamic type might
|
||||||
|
// implement one of the interesting interfaces or be composed of another type that
|
||||||
|
// does.
|
||||||
|
return checker{
|
||||||
|
safe: func() bool {
|
||||||
|
return false
|
||||||
|
},
|
||||||
|
check: func(rv reflect.Value, depth int) error {
|
||||||
|
if rv.IsNil() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Unpacking interfaces must count against recursion depth,
|
||||||
|
// consider this cycle:
|
||||||
|
// > var i interface{}
|
||||||
|
// > var p *interface{} = &i
|
||||||
|
// > i = p
|
||||||
|
// > rv := reflect.ValueOf(i)
|
||||||
|
// > for {
|
||||||
|
// > rv = rv.Elem()
|
||||||
|
// > }
|
||||||
|
if depth <= 0 {
|
||||||
|
return errMaxDepthExceeded
|
||||||
|
}
|
||||||
|
rv = rv.Elem()
|
||||||
|
return cache.getChecker(rv.Type()).check(rv, depth-1)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Map:
|
||||||
|
rtk := rt.Key()
|
||||||
|
ck := cache.getCheckerInternal(rtk, self)
|
||||||
|
rte := rt.Elem()
|
||||||
|
ce := cache.getCheckerInternal(rte, self)
|
||||||
|
if !self.cyclic(rtk) && !self.cyclic(rte) && ck.safe() && ce.safe() {
|
||||||
|
return noop
|
||||||
|
}
|
||||||
|
return checker{
|
||||||
|
safe: func() bool {
|
||||||
|
return false
|
||||||
|
},
|
||||||
|
check: func(rv reflect.Value, depth int) error {
|
||||||
|
if depth <= 0 {
|
||||||
|
return errMaxDepthExceeded
|
||||||
|
}
|
||||||
|
iter := rv.MapRange()
|
||||||
|
rvk := reflect.New(rtk).Elem()
|
||||||
|
rve := reflect.New(rte).Elem()
|
||||||
|
for iter.Next() {
|
||||||
|
rvk.SetIterKey(iter)
|
||||||
|
if err := ck.check(rvk, depth-1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rve.SetIterValue(iter)
|
||||||
|
if err := ce.check(rve, depth-1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Pointer:
|
||||||
|
ce := cache.getCheckerInternal(rt.Elem(), self)
|
||||||
|
if !self.cyclic(rt.Elem()) && ce.safe() {
|
||||||
|
return noop
|
||||||
|
}
|
||||||
|
return checker{
|
||||||
|
safe: func() bool {
|
||||||
|
return false
|
||||||
|
},
|
||||||
|
check: func(rv reflect.Value, depth int) error {
|
||||||
|
if rv.IsNil() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if depth <= 0 {
|
||||||
|
return errMaxDepthExceeded
|
||||||
|
}
|
||||||
|
return ce.check(rv.Elem(), depth-1)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Slice:
|
||||||
|
ce := cache.getCheckerInternal(rt.Elem(), self)
|
||||||
|
if !self.cyclic(rt.Elem()) && ce.safe() {
|
||||||
|
return noop
|
||||||
|
}
|
||||||
|
return checker{
|
||||||
|
safe: func() bool {
|
||||||
|
return false
|
||||||
|
},
|
||||||
|
check: func(rv reflect.Value, depth int) error {
|
||||||
|
if depth <= 0 {
|
||||||
|
return errMaxDepthExceeded
|
||||||
|
}
|
||||||
|
for i := 0; i < rv.Len(); i++ {
|
||||||
|
if err := ce.check(rv.Index(i), depth-1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Struct:
|
||||||
|
type field struct {
|
||||||
|
Index int
|
||||||
|
Checker checker
|
||||||
|
}
|
||||||
|
var fields []field
|
||||||
|
for i := 0; i < rt.NumField(); i++ {
|
||||||
|
f := rt.Field(i)
|
||||||
|
cf := cache.getCheckerInternal(f.Type, self)
|
||||||
|
if !self.cyclic(f.Type) && cf.safe() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fields = append(fields, field{Index: i, Checker: cf})
|
||||||
|
}
|
||||||
|
if len(fields) == 0 {
|
||||||
|
return noop
|
||||||
|
}
|
||||||
|
return checker{
|
||||||
|
safe: func() bool {
|
||||||
|
return false
|
||||||
|
},
|
||||||
|
check: func(rv reflect.Value, depth int) error {
|
||||||
|
if depth <= 0 {
|
||||||
|
return errMaxDepthExceeded
|
||||||
|
}
|
||||||
|
for _, fi := range fields {
|
||||||
|
if err := fi.Checker.check(rv.Field(fi.Index), depth-1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
// Not a serializable composite type (funcs and channels are composite types but are
|
||||||
|
// rejected by JSON and CBOR serialization).
|
||||||
|
return noop
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkUnstructuredValue(cache *checkers, v interface{}, depth int) error {
|
||||||
|
switch v := v.(type) {
|
||||||
|
case nil, bool, int64, float64, string:
|
||||||
|
return nil
|
||||||
|
case []interface{}:
|
||||||
|
if depth <= 0 {
|
||||||
|
return errMaxDepthExceeded
|
||||||
|
}
|
||||||
|
for _, element := range v {
|
||||||
|
if err := checkUnstructuredValue(cache, element, depth-1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
case map[string]interface{}:
|
||||||
|
if depth <= 0 {
|
||||||
|
return errMaxDepthExceeded
|
||||||
|
}
|
||||||
|
for _, element := range v {
|
||||||
|
if err := checkUnstructuredValue(cache, element, depth-1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
// Unmarshaling an unstructured doesn't use other dynamic types, but nothing
|
||||||
|
// prevents inserting values with arbitrary dynamic types into unstructured content,
|
||||||
|
// as long as they can be marshalled.
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
return cache.getChecker(rv.Type()).check(rv, depth)
|
||||||
|
}
|
||||||
|
}
|
158
api/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go
generated
vendored
Normal file
158
api/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go
generated
vendored
Normal file
@ -0,0 +1,158 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2024 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package modes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/fxamacker/cbor/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var simpleValues *cbor.SimpleValueRegistry = func() *cbor.SimpleValueRegistry {
|
||||||
|
var opts []func(*cbor.SimpleValueRegistry) error
|
||||||
|
for sv := 0; sv <= 255; sv++ {
|
||||||
|
// Reject simple values 0-19, 23, and 32-255. The simple values 24-31 are reserved
|
||||||
|
// and considered ill-formed by the CBOR specification. We only accept false (20),
|
||||||
|
// true (21), and null (22).
|
||||||
|
switch sv {
|
||||||
|
case 20: // false
|
||||||
|
case 21: // true
|
||||||
|
case 22: // null
|
||||||
|
case 24, 25, 26, 27, 28, 29, 30, 31: // reserved
|
||||||
|
default:
|
||||||
|
opts = append(opts, cbor.WithRejectedSimpleValue(cbor.SimpleValue(sv)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
simpleValues, err := cbor.NewSimpleValueRegistryFromDefaults(opts...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return simpleValues
|
||||||
|
}()
|
||||||
|
|
||||||
|
var Decode cbor.DecMode = func() cbor.DecMode {
|
||||||
|
decode, err := cbor.DecOptions{
|
||||||
|
// Maps with duplicate keys are well-formed but invalid according to the CBOR spec
|
||||||
|
// and never acceptable. Unlike the JSON serializer, inputs containing duplicate map
|
||||||
|
// keys are rejected outright and not surfaced as a strict decoding error.
|
||||||
|
DupMapKey: cbor.DupMapKeyEnforcedAPF,
|
||||||
|
|
||||||
|
// For JSON parity, decoding an RFC3339 string into time.Time needs to be accepted
|
||||||
|
// with or without tagging. If a tag number is present, it must be valid.
|
||||||
|
TimeTag: cbor.DecTagOptional,
|
||||||
|
|
||||||
|
// Observed depth up to 16 in fuzzed batch/v1 CronJobList. JSON implementation limit
|
||||||
|
// is 10000.
|
||||||
|
MaxNestedLevels: 64,
|
||||||
|
|
||||||
|
MaxArrayElements: 1024,
|
||||||
|
MaxMapPairs: 1024,
|
||||||
|
|
||||||
|
// Indefinite-length sequences aren't produced by this serializer, but other
|
||||||
|
// implementations can.
|
||||||
|
IndefLength: cbor.IndefLengthAllowed,
|
||||||
|
|
||||||
|
// Accept inputs that contain CBOR tags.
|
||||||
|
TagsMd: cbor.TagsAllowed,
|
||||||
|
|
||||||
|
// Decode type 0 (unsigned integer) as int64.
|
||||||
|
// TODO: IntDecConvertSignedOrFail errors on overflow, JSON will try to fall back to float64.
|
||||||
|
IntDec: cbor.IntDecConvertSignedOrFail,
|
||||||
|
|
||||||
|
// Disable producing map[cbor.ByteString]interface{}, which is not acceptable for
|
||||||
|
// decodes into interface{}.
|
||||||
|
MapKeyByteString: cbor.MapKeyByteStringForbidden,
|
||||||
|
|
||||||
|
// Error on map keys that don't map to a field in the destination struct.
|
||||||
|
ExtraReturnErrors: cbor.ExtraDecErrorUnknownField,
|
||||||
|
|
||||||
|
// Decode maps into concrete type map[string]interface{} when the destination is an
|
||||||
|
// interface{}.
|
||||||
|
DefaultMapType: reflect.TypeOf(map[string]interface{}(nil)),
|
||||||
|
|
||||||
|
// A CBOR text string whose content is not a valid UTF-8 sequence is well-formed but
|
||||||
|
// invalid according to the CBOR spec. Reject invalid inputs. Encoders are
|
||||||
|
// responsible for ensuring that all text strings they produce contain valid UTF-8
|
||||||
|
// sequences and may use the byte string major type to encode strings that have not
|
||||||
|
// been validated.
|
||||||
|
UTF8: cbor.UTF8RejectInvalid,
|
||||||
|
|
||||||
|
// Never make a case-insensitive match between a map key and a struct field.
|
||||||
|
FieldNameMatching: cbor.FieldNameMatchingCaseSensitive,
|
||||||
|
|
||||||
|
// Produce string concrete values when decoding a CBOR byte string into interface{}.
|
||||||
|
DefaultByteStringType: reflect.TypeOf(""),
|
||||||
|
|
||||||
|
// Allow CBOR byte strings to be decoded into string destination values. If a byte
|
||||||
|
// string is enclosed in an "expected later encoding" tag
|
||||||
|
// (https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.5.2), then the text
|
||||||
|
// encoding indicated by that tag (e.g. base64) will be applied to the contents of
|
||||||
|
// the byte string.
|
||||||
|
ByteStringToString: cbor.ByteStringToStringAllowedWithExpectedLaterEncoding,
|
||||||
|
|
||||||
|
// Allow CBOR byte strings to match struct fields when appearing as a map key.
|
||||||
|
FieldNameByteString: cbor.FieldNameByteStringAllowed,
|
||||||
|
|
||||||
|
// When decoding an unrecognized tag to interface{}, return the decoded tag content
|
||||||
|
// instead of the default, a cbor.Tag representing a (number, content) pair.
|
||||||
|
UnrecognizedTagToAny: cbor.UnrecognizedTagContentToAny,
|
||||||
|
|
||||||
|
// Decode time tags to interface{} as strings containing RFC 3339 timestamps.
|
||||||
|
TimeTagToAny: cbor.TimeTagToRFC3339Nano,
|
||||||
|
|
||||||
|
// For parity with JSON, strings can be decoded into time.Time if they are RFC 3339
|
||||||
|
// timestamps.
|
||||||
|
ByteStringToTime: cbor.ByteStringToTimeAllowed,
|
||||||
|
|
||||||
|
// Reject NaN and infinite floating-point values since they don't have a JSON
|
||||||
|
// representation (RFC 8259 Section 6).
|
||||||
|
NaN: cbor.NaNDecodeForbidden,
|
||||||
|
Inf: cbor.InfDecodeForbidden,
|
||||||
|
|
||||||
|
// When unmarshaling a byte string into a []byte, assume that the byte string
|
||||||
|
// contains base64-encoded bytes, unless explicitly counterindicated by an "expected
|
||||||
|
// later encoding" tag. This is consistent with the because of unmarshaling a JSON
|
||||||
|
// text into a []byte.
|
||||||
|
ByteStringExpectedFormat: cbor.ByteStringExpectedBase64,
|
||||||
|
|
||||||
|
// Reject the arbitrary-precision integer tags because they can't be faithfully
|
||||||
|
// roundtripped through the allowable Unstructured types.
|
||||||
|
BignumTag: cbor.BignumTagForbidden,
|
||||||
|
|
||||||
|
// Reject anything other than the simple values true, false, and null.
|
||||||
|
SimpleValues: simpleValues,
|
||||||
|
|
||||||
|
// Disable default recognition of types implementing encoding.BinaryUnmarshaler,
|
||||||
|
// which is not recognized for JSON decoding.
|
||||||
|
BinaryUnmarshaler: cbor.BinaryUnmarshalerNone,
|
||||||
|
}.DecMode()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return decode
|
||||||
|
}()
|
||||||
|
|
||||||
|
// DecodeLax is derived from Decode, but does not complain about unknown fields in the input.
|
||||||
|
var DecodeLax cbor.DecMode = func() cbor.DecMode {
|
||||||
|
opts := Decode.DecOptions()
|
||||||
|
opts.ExtraReturnErrors &^= cbor.ExtraDecErrorUnknownField // clear bit
|
||||||
|
dm, err := opts.DecMode()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return dm
|
||||||
|
}()
|
36
api/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go
generated
vendored
Normal file
36
api/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2024 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package modes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/fxamacker/cbor/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var Diagnostic cbor.DiagMode = func() cbor.DiagMode {
|
||||||
|
opts := Decode.DecOptions()
|
||||||
|
diagnostic, err := cbor.DiagOptions{
|
||||||
|
ByteStringText: true,
|
||||||
|
|
||||||
|
MaxNestedLevels: opts.MaxNestedLevels,
|
||||||
|
MaxArrayElements: opts.MaxArrayElements,
|
||||||
|
MaxMapPairs: opts.MaxMapPairs,
|
||||||
|
}.DiagMode()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return diagnostic
|
||||||
|
}()
|
155
api/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go
generated
vendored
Normal file
155
api/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go
generated
vendored
Normal file
@ -0,0 +1,155 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2024 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package modes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/fxamacker/cbor/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var Encode = EncMode{
|
||||||
|
delegate: func() cbor.UserBufferEncMode {
|
||||||
|
encode, err := cbor.EncOptions{
|
||||||
|
// Map keys need to be sorted to have deterministic output, and this is the order
|
||||||
|
// defined in RFC 8949 4.2.1 "Core Deterministic Encoding Requirements".
|
||||||
|
Sort: cbor.SortBytewiseLexical,
|
||||||
|
|
||||||
|
// CBOR supports distinct types for IEEE-754 float16, float32, and float64. Store
|
||||||
|
// floats in the smallest width that preserves value so that equivalent float32 and
|
||||||
|
// float64 values encode to identical bytes, as they do in a JSON
|
||||||
|
// encoding. Satisfies one of the "Core Deterministic Encoding Requirements".
|
||||||
|
ShortestFloat: cbor.ShortestFloat16,
|
||||||
|
|
||||||
|
// Error on attempt to encode NaN and infinite values. This is what the JSON
|
||||||
|
// serializer does.
|
||||||
|
NaNConvert: cbor.NaNConvertReject,
|
||||||
|
InfConvert: cbor.InfConvertReject,
|
||||||
|
|
||||||
|
// Error on attempt to encode math/big.Int values, which can't be faithfully
|
||||||
|
// roundtripped through Unstructured in general (the dynamic numeric types allowed
|
||||||
|
// in Unstructured are limited to float64 and int64).
|
||||||
|
BigIntConvert: cbor.BigIntConvertReject,
|
||||||
|
|
||||||
|
// MarshalJSON for time.Time writes RFC3339 with nanos.
|
||||||
|
Time: cbor.TimeRFC3339Nano,
|
||||||
|
|
||||||
|
// The decoder must be able to accept RFC3339 strings with or without tag 0 (e.g. by
|
||||||
|
// the end of time.Time -> JSON -> Unstructured -> CBOR, the CBOR encoder has no
|
||||||
|
// reliable way of knowing that a particular string originated from serializing a
|
||||||
|
// time.Time), so producing tag 0 has little use.
|
||||||
|
TimeTag: cbor.EncTagNone,
|
||||||
|
|
||||||
|
// Indefinite-length items have multiple encodings and aren't being used anyway, so
|
||||||
|
// disable to avoid an opportunity for nondeterminism.
|
||||||
|
IndefLength: cbor.IndefLengthForbidden,
|
||||||
|
|
||||||
|
// Preserve distinction between nil and empty for slices and maps.
|
||||||
|
NilContainers: cbor.NilContainerAsNull,
|
||||||
|
|
||||||
|
// OK to produce tags.
|
||||||
|
TagsMd: cbor.TagsAllowed,
|
||||||
|
|
||||||
|
// Use the same definition of "empty" as encoding/json.
|
||||||
|
OmitEmpty: cbor.OmitEmptyGoValue,
|
||||||
|
|
||||||
|
// The CBOR types text string and byte string are structurally equivalent, with the
|
||||||
|
// semantic difference that a text string whose content is an invalid UTF-8 sequence
|
||||||
|
// is itself invalid. We reject all invalid text strings at decode time and do not
|
||||||
|
// validate or sanitize all Go strings at encode time. Encoding Go strings to the
|
||||||
|
// byte string type is comparable to the existing Protobuf behavior and cheaply
|
||||||
|
// ensures that the output is valid CBOR.
|
||||||
|
String: cbor.StringToByteString,
|
||||||
|
|
||||||
|
// Encode struct field names to the byte string type rather than the text string
|
||||||
|
// type.
|
||||||
|
FieldName: cbor.FieldNameToByteString,
|
||||||
|
|
||||||
|
// Marshal Go byte arrays to CBOR arrays of integers (as in JSON) instead of byte
|
||||||
|
// strings.
|
||||||
|
ByteArray: cbor.ByteArrayToArray,
|
||||||
|
|
||||||
|
// Marshal []byte to CBOR byte string enclosed in tag 22 (expected later base64
|
||||||
|
// encoding, https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.5.2), to
|
||||||
|
// interoperate with the existing JSON behavior. This indicates to the decoder that,
|
||||||
|
// when decoding into a string (or unstructured), the resulting value should be the
|
||||||
|
// base64 encoding of the original bytes. No base64 encoding or decoding needs to be
|
||||||
|
// performed for []byte-to-CBOR-to-[]byte roundtrips.
|
||||||
|
ByteSliceLaterFormat: cbor.ByteSliceLaterFormatBase64,
|
||||||
|
|
||||||
|
// Disable default recognition of types implementing encoding.BinaryMarshaler, which
|
||||||
|
// is not recognized for JSON encoding.
|
||||||
|
BinaryMarshaler: cbor.BinaryMarshalerNone,
|
||||||
|
}.UserBufferEncMode()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return encode
|
||||||
|
}(),
|
||||||
|
}
|
||||||
|
|
||||||
|
var EncodeNondeterministic = EncMode{
|
||||||
|
delegate: func() cbor.UserBufferEncMode {
|
||||||
|
opts := Encode.options()
|
||||||
|
opts.Sort = cbor.SortNone // TODO: Use cbor.SortFastShuffle after bump to v2.7.0.
|
||||||
|
em, err := opts.UserBufferEncMode()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return em
|
||||||
|
}(),
|
||||||
|
}
|
||||||
|
|
||||||
|
type EncMode struct {
|
||||||
|
delegate cbor.UserBufferEncMode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (em EncMode) options() cbor.EncOptions {
|
||||||
|
return em.delegate.EncOptions()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (em EncMode) MarshalTo(v interface{}, w io.Writer) error {
|
||||||
|
if buf, ok := w.(*buffer); ok {
|
||||||
|
return em.delegate.MarshalToBuffer(v, &buf.Buffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := buffers.Get()
|
||||||
|
defer buffers.Put(buf)
|
||||||
|
if err := em.delegate.MarshalToBuffer(v, &buf.Buffer); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(w, buf); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (em EncMode) Marshal(v interface{}) ([]byte, error) {
|
||||||
|
buf := buffers.Get()
|
||||||
|
defer buffers.Put(buf)
|
||||||
|
|
||||||
|
if err := em.MarshalTo(v, &buf.Buffer); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
clone := make([]byte, buf.Len())
|
||||||
|
copy(clone, buf.Bytes())
|
||||||
|
|
||||||
|
return clone, nil
|
||||||
|
}
|
1
api/vendor/k8s.io/apimachinery/pkg/runtime/types.go
generated
vendored
1
api/vendor/k8s.io/apimachinery/pkg/runtime/types.go
generated
vendored
@ -46,6 +46,7 @@ const (
|
|||||||
ContentTypeJSON string = "application/json"
|
ContentTypeJSON string = "application/json"
|
||||||
ContentTypeYAML string = "application/yaml"
|
ContentTypeYAML string = "application/yaml"
|
||||||
ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf"
|
ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf"
|
||||||
|
ContentTypeCBOR string = "application/cbor"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RawExtension is used to hold extensions in external versions.
|
// RawExtension is used to hold extensions in external versions.
|
||||||
|
26
api/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
generated
vendored
26
api/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
generated
vendored
@ -25,6 +25,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -92,6 +93,20 @@ func (intstr *IntOrString) UnmarshalJSON(value []byte) error {
|
|||||||
return json.Unmarshal(value, &intstr.IntVal)
|
return json.Unmarshal(value, &intstr.IntVal)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (intstr *IntOrString) UnmarshalCBOR(value []byte) error {
|
||||||
|
if err := cbor.Unmarshal(value, &intstr.StrVal); err == nil {
|
||||||
|
intstr.Type = String
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbor.Unmarshal(value, &intstr.IntVal); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
intstr.Type = Int
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// String returns the string value, or the Itoa of the int value.
|
// String returns the string value, or the Itoa of the int value.
|
||||||
func (intstr *IntOrString) String() string {
|
func (intstr *IntOrString) String() string {
|
||||||
if intstr == nil {
|
if intstr == nil {
|
||||||
@ -126,6 +141,17 @@ func (intstr IntOrString) MarshalJSON() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (intstr IntOrString) MarshalCBOR() ([]byte, error) {
|
||||||
|
switch intstr.Type {
|
||||||
|
case Int:
|
||||||
|
return cbor.Marshal(intstr.IntVal)
|
||||||
|
case String:
|
||||||
|
return cbor.Marshal(intstr.StrVal)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("impossible IntOrString.Type")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// OpenAPISchemaType is used by the kube-openapi generator when constructing
|
// OpenAPISchemaType is used by the kube-openapi generator when constructing
|
||||||
// the OpenAPI spec of this type.
|
// the OpenAPI spec of this type.
|
||||||
//
|
//
|
||||||
|
111
api/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
generated
vendored
111
api/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package runtime
|
package runtime
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"runtime"
|
"runtime"
|
||||||
@ -35,7 +36,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// PanicHandlers is a list of functions which will be invoked when a panic happens.
|
// PanicHandlers is a list of functions which will be invoked when a panic happens.
|
||||||
var PanicHandlers = []func(interface{}){logPanic}
|
var PanicHandlers = []func(context.Context, interface{}){logPanic}
|
||||||
|
|
||||||
// HandleCrash simply catches a crash and logs an error. Meant to be called via
|
// HandleCrash simply catches a crash and logs an error. Meant to be called via
|
||||||
// defer. Additional context-specific handlers can be provided, and will be
|
// defer. Additional context-specific handlers can be provided, and will be
|
||||||
@ -43,23 +44,54 @@ var PanicHandlers = []func(interface{}){logPanic}
|
|||||||
// handlers and logging the panic message.
|
// handlers and logging the panic message.
|
||||||
//
|
//
|
||||||
// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully.
|
// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully.
|
||||||
|
//
|
||||||
|
// TODO(pohly): logcheck:context // HandleCrashWithContext should be used instead of HandleCrash in code which supports contextual logging.
|
||||||
func HandleCrash(additionalHandlers ...func(interface{})) {
|
func HandleCrash(additionalHandlers ...func(interface{})) {
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
|
additionalHandlersWithContext := make([]func(context.Context, interface{}), len(additionalHandlers))
|
||||||
|
for i, handler := range additionalHandlers {
|
||||||
|
handler := handler // capture loop variable
|
||||||
|
additionalHandlersWithContext[i] = func(_ context.Context, r interface{}) {
|
||||||
|
handler(r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
handleCrash(context.Background(), r, additionalHandlersWithContext...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleCrashWithContext simply catches a crash and logs an error. Meant to be called via
|
||||||
|
// defer. Additional context-specific handlers can be provided, and will be
|
||||||
|
// called in case of panic. HandleCrash actually crashes, after calling the
|
||||||
|
// handlers and logging the panic message.
|
||||||
|
//
|
||||||
|
// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully.
|
||||||
|
//
|
||||||
|
// The context is used to determine how to log.
|
||||||
|
func HandleCrashWithContext(ctx context.Context, additionalHandlers ...func(context.Context, interface{})) {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
handleCrash(ctx, r, additionalHandlers...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleCrash is the common implementation of HandleCrash and HandleCrash.
|
||||||
|
// Having those call a common implementation ensures that the stack depth
|
||||||
|
// is the same regardless through which path the handlers get invoked.
|
||||||
|
func handleCrash(ctx context.Context, r any, additionalHandlers ...func(context.Context, interface{})) {
|
||||||
for _, fn := range PanicHandlers {
|
for _, fn := range PanicHandlers {
|
||||||
fn(r)
|
fn(ctx, r)
|
||||||
}
|
}
|
||||||
for _, fn := range additionalHandlers {
|
for _, fn := range additionalHandlers {
|
||||||
fn(r)
|
fn(ctx, r)
|
||||||
}
|
}
|
||||||
if ReallyCrash {
|
if ReallyCrash {
|
||||||
// Actually proceed to panic.
|
// Actually proceed to panic.
|
||||||
panic(r)
|
panic(r)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler).
|
// logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler).
|
||||||
func logPanic(r interface{}) {
|
func logPanic(ctx context.Context, r interface{}) {
|
||||||
if r == http.ErrAbortHandler {
|
if r == http.ErrAbortHandler {
|
||||||
// honor the http.ErrAbortHandler sentinel panic value:
|
// honor the http.ErrAbortHandler sentinel panic value:
|
||||||
// ErrAbortHandler is a sentinel panic value to abort a handler.
|
// ErrAbortHandler is a sentinel panic value to abort a handler.
|
||||||
@ -73,10 +105,20 @@ func logPanic(r interface{}) {
|
|||||||
const size = 64 << 10
|
const size = 64 << 10
|
||||||
stacktrace := make([]byte, size)
|
stacktrace := make([]byte, size)
|
||||||
stacktrace = stacktrace[:runtime.Stack(stacktrace, false)]
|
stacktrace = stacktrace[:runtime.Stack(stacktrace, false)]
|
||||||
|
|
||||||
|
// We don't really know how many call frames to skip because the Go
|
||||||
|
// panic handler is between us and the code where the panic occurred.
|
||||||
|
// If it's one function (as in Go 1.21), then skipping four levels
|
||||||
|
// gets us to the function which called the `defer HandleCrashWithontext(...)`.
|
||||||
|
logger := klog.FromContext(ctx).WithCallDepth(4)
|
||||||
|
|
||||||
|
// For backwards compatibility, conversion to string
|
||||||
|
// is handled here instead of defering to the logging
|
||||||
|
// backend.
|
||||||
if _, ok := r.(string); ok {
|
if _, ok := r.(string); ok {
|
||||||
klog.Errorf("Observed a panic: %s\n%s", r, stacktrace)
|
logger.Error(nil, "Observed a panic", "panic", r, "stacktrace", string(stacktrace))
|
||||||
} else {
|
} else {
|
||||||
klog.Errorf("Observed a panic: %#v (%v)\n%s", r, r, stacktrace)
|
logger.Error(nil, "Observed a panic", "panic", fmt.Sprintf("%v", r), "panicGoValue", fmt.Sprintf("%#v", r), "stacktrace", string(stacktrace))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -84,35 +126,76 @@ func logPanic(r interface{}) {
|
|||||||
// error occurs.
|
// error occurs.
|
||||||
// TODO(lavalamp): for testability, this and the below HandleError function
|
// TODO(lavalamp): for testability, this and the below HandleError function
|
||||||
// should be packaged up into a testable and reusable object.
|
// should be packaged up into a testable and reusable object.
|
||||||
var ErrorHandlers = []func(error){
|
var ErrorHandlers = []ErrorHandler{
|
||||||
logError,
|
logError,
|
||||||
|
func(_ context.Context, _ error, _ string, _ ...interface{}) {
|
||||||
(&rudimentaryErrorBackoff{
|
(&rudimentaryErrorBackoff{
|
||||||
lastErrorTime: time.Now(),
|
lastErrorTime: time.Now(),
|
||||||
// 1ms was the number folks were able to stomach as a global rate limit.
|
// 1ms was the number folks were able to stomach as a global rate limit.
|
||||||
// If you need to log errors more than 1000 times a second you
|
// If you need to log errors more than 1000 times a second you
|
||||||
// should probably consider fixing your code instead. :)
|
// should probably consider fixing your code instead. :)
|
||||||
minPeriod: time.Millisecond,
|
minPeriod: time.Millisecond,
|
||||||
}).OnError,
|
}).OnError()
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ErrorHandler func(ctx context.Context, err error, msg string, keysAndValues ...interface{})
|
||||||
|
|
||||||
// HandlerError is a method to invoke when a non-user facing piece of code cannot
|
// HandlerError is a method to invoke when a non-user facing piece of code cannot
|
||||||
// return an error and needs to indicate it has been ignored. Invoking this method
|
// return an error and needs to indicate it has been ignored. Invoking this method
|
||||||
// is preferable to logging the error - the default behavior is to log but the
|
// is preferable to logging the error - the default behavior is to log but the
|
||||||
// errors may be sent to a remote server for analysis.
|
// errors may be sent to a remote server for analysis.
|
||||||
|
//
|
||||||
|
// TODO(pohly): logcheck:context // HandleErrorWithContext should be used instead of HandleError in code which supports contextual logging.
|
||||||
func HandleError(err error) {
|
func HandleError(err error) {
|
||||||
// this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead
|
// this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
handleError(context.Background(), err, "Unhandled Error")
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandlerErrorWithContext is a method to invoke when a non-user facing piece of code cannot
|
||||||
|
// return an error and needs to indicate it has been ignored. Invoking this method
|
||||||
|
// is preferable to logging the error - the default behavior is to log but the
|
||||||
|
// errors may be sent to a remote server for analysis. The context is used to
|
||||||
|
// determine how to log the error.
|
||||||
|
//
|
||||||
|
// If contextual logging is enabled, the default log output is equivalent to
|
||||||
|
//
|
||||||
|
// logr.FromContext(ctx).WithName("UnhandledError").Error(err, msg, keysAndValues...)
|
||||||
|
//
|
||||||
|
// Without contextual logging, it is equivalent to:
|
||||||
|
//
|
||||||
|
// klog.ErrorS(err, msg, keysAndValues...)
|
||||||
|
//
|
||||||
|
// In contrast to HandleError, passing nil for the error is still going to
|
||||||
|
// trigger a log entry. Don't construct a new error or wrap an error
|
||||||
|
// with fmt.Errorf. Instead, add additional information via the mssage
|
||||||
|
// and key/value pairs.
|
||||||
|
//
|
||||||
|
// This variant should be used instead of HandleError because it supports
|
||||||
|
// structured, contextual logging.
|
||||||
|
func HandleErrorWithContext(ctx context.Context, err error, msg string, keysAndValues ...interface{}) {
|
||||||
|
handleError(ctx, err, msg, keysAndValues...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleError is the common implementation of HandleError and HandleErrorWithContext.
|
||||||
|
// Using this common implementation ensures that the stack depth
|
||||||
|
// is the same regardless through which path the handlers get invoked.
|
||||||
|
func handleError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) {
|
||||||
for _, fn := range ErrorHandlers {
|
for _, fn := range ErrorHandlers {
|
||||||
fn(err)
|
fn(ctx, err, msg, keysAndValues...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// logError prints an error with the call stack of the location it was reported
|
// logError prints an error with the call stack of the location it was reported.
|
||||||
func logError(err error) {
|
// It expects to be called as <caller> -> HandleError[WithContext] -> handleError -> logError.
|
||||||
klog.ErrorDepth(2, err)
|
func logError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) {
|
||||||
|
logger := klog.FromContext(ctx).WithCallDepth(3)
|
||||||
|
logger = klog.LoggerWithName(logger, "UnhandledError")
|
||||||
|
logger.Error(err, msg, keysAndValues...) //nolint:logcheck // logcheck complains about unknown key/value pairs.
|
||||||
}
|
}
|
||||||
|
|
||||||
type rudimentaryErrorBackoff struct {
|
type rudimentaryErrorBackoff struct {
|
||||||
@ -125,7 +208,7 @@ type rudimentaryErrorBackoff struct {
|
|||||||
|
|
||||||
// OnError will block if it is called more often than the embedded period time.
|
// OnError will block if it is called more often than the embedded period time.
|
||||||
// This will prevent overly tight hot error loops.
|
// This will prevent overly tight hot error loops.
|
||||||
func (r *rudimentaryErrorBackoff) OnError(error) {
|
func (r *rudimentaryErrorBackoff) OnError() {
|
||||||
now := time.Now() // start the timer before acquiring the lock
|
now := time.Now() // start the timer before acquiring the lock
|
||||||
r.lastErrorTimeLock.Lock()
|
r.lastErrorTimeLock.Lock()
|
||||||
d := now.Sub(r.lastErrorTime)
|
d := now.Sub(r.lastErrorTime)
|
||||||
|
8
api/vendor/k8s.io/apimachinery/pkg/util/sets/set.go
generated
vendored
8
api/vendor/k8s.io/apimachinery/pkg/util/sets/set.go
generated
vendored
@ -68,14 +68,8 @@ func (s Set[T]) Delete(items ...T) Set[T] {
|
|||||||
// Clear empties the set.
|
// Clear empties the set.
|
||||||
// It is preferable to replace the set with a newly constructed set,
|
// It is preferable to replace the set with a newly constructed set,
|
||||||
// but not all callers can do that (when there are other references to the map).
|
// but not all callers can do that (when there are other references to the map).
|
||||||
// In some cases the set *won't* be fully cleared, e.g. a Set[float32] containing NaN
|
|
||||||
// can't be cleared because NaN can't be removed.
|
|
||||||
// For sets containing items of a type that is reflexive for ==,
|
|
||||||
// this is optimized to a single call to runtime.mapclear().
|
|
||||||
func (s Set[T]) Clear() Set[T] {
|
func (s Set[T]) Clear() Set[T] {
|
||||||
for key := range s {
|
clear(s)
|
||||||
delete(s, key)
|
|
||||||
}
|
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
40
api/vendor/k8s.io/apimachinery/pkg/watch/watch.go
generated
vendored
40
api/vendor/k8s.io/apimachinery/pkg/watch/watch.go
generated
vendored
@ -27,13 +27,25 @@ import (
|
|||||||
|
|
||||||
// Interface can be implemented by anything that knows how to watch and report changes.
|
// Interface can be implemented by anything that knows how to watch and report changes.
|
||||||
type Interface interface {
|
type Interface interface {
|
||||||
// Stop stops watching. Will close the channel returned by ResultChan(). Releases
|
// Stop tells the producer that the consumer is done watching, so the
|
||||||
// any resources used by the watch.
|
// producer should stop sending events and close the result channel. The
|
||||||
|
// consumer should keep watching for events until the result channel is
|
||||||
|
// closed.
|
||||||
|
//
|
||||||
|
// Because some implementations may create channels when constructed, Stop
|
||||||
|
// must always be called, even if the consumer has not yet called
|
||||||
|
// ResultChan().
|
||||||
|
//
|
||||||
|
// Only the consumer should call Stop(), not the producer. If the producer
|
||||||
|
// errors and needs to stop the watch prematurely, it should instead send
|
||||||
|
// an error event and close the result channel.
|
||||||
Stop()
|
Stop()
|
||||||
|
|
||||||
// ResultChan returns a chan which will receive all the events. If an error occurs
|
// ResultChan returns a channel which will receive events from the event
|
||||||
// or Stop() is called, the implementation will close this channel and
|
// producer. If an error occurs or Stop() is called, the producer must
|
||||||
// release any resources used by the watch.
|
// close this channel and release any resources used by the watch.
|
||||||
|
// Closing the result channel tells the consumer that no more events will be
|
||||||
|
// sent.
|
||||||
ResultChan() <-chan Event
|
ResultChan() <-chan Event
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -322,3 +334,21 @@ func (pw *ProxyWatcher) ResultChan() <-chan Event {
|
|||||||
func (pw *ProxyWatcher) StopChan() <-chan struct{} {
|
func (pw *ProxyWatcher) StopChan() <-chan struct{} {
|
||||||
return pw.stopCh
|
return pw.stopCh
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MockWatcher implements watch.Interface with mockable functions.
|
||||||
|
type MockWatcher struct {
|
||||||
|
StopFunc func()
|
||||||
|
ResultChanFunc func() <-chan Event
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Interface = &MockWatcher{}
|
||||||
|
|
||||||
|
// Stop calls StopFunc
|
||||||
|
func (mw MockWatcher) Stop() {
|
||||||
|
mw.StopFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResultChan calls ResultChanFunc
|
||||||
|
func (mw MockWatcher) ResultChan() <-chan Event {
|
||||||
|
return mw.ResultChanFunc()
|
||||||
|
}
|
||||||
|
76
api/vendor/k8s.io/klog/v2/klog.go
generated
vendored
76
api/vendor/k8s.io/klog/v2/klog.go
generated
vendored
@ -404,13 +404,6 @@ func (t *traceLocation) Set(value string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// flushSyncWriter is the interface satisfied by logging destinations.
|
|
||||||
type flushSyncWriter interface {
|
|
||||||
Flush() error
|
|
||||||
Sync() error
|
|
||||||
io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
var logging loggingT
|
var logging loggingT
|
||||||
var commandLine flag.FlagSet
|
var commandLine flag.FlagSet
|
||||||
|
|
||||||
@ -486,7 +479,7 @@ type settings struct {
|
|||||||
// Access to all of the following fields must be protected via a mutex.
|
// Access to all of the following fields must be protected via a mutex.
|
||||||
|
|
||||||
// file holds writer for each of the log types.
|
// file holds writer for each of the log types.
|
||||||
file [severity.NumSeverity]flushSyncWriter
|
file [severity.NumSeverity]io.Writer
|
||||||
// flushInterval is the interval for periodic flushing. If zero,
|
// flushInterval is the interval for periodic flushing. If zero,
|
||||||
// the global default will be used.
|
// the global default will be used.
|
||||||
flushInterval time.Duration
|
flushInterval time.Duration
|
||||||
@ -831,32 +824,12 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string,
|
|||||||
buffer.PutBuffer(b)
|
buffer.PutBuffer(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
// redirectBuffer is used to set an alternate destination for the logs
|
|
||||||
type redirectBuffer struct {
|
|
||||||
w io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rb *redirectBuffer) Sync() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rb *redirectBuffer) Flush() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) {
|
|
||||||
return rb.w.Write(bytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetOutput sets the output destination for all severities
|
// SetOutput sets the output destination for all severities
|
||||||
func SetOutput(w io.Writer) {
|
func SetOutput(w io.Writer) {
|
||||||
logging.mu.Lock()
|
logging.mu.Lock()
|
||||||
defer logging.mu.Unlock()
|
defer logging.mu.Unlock()
|
||||||
for s := severity.FatalLog; s >= severity.InfoLog; s-- {
|
for s := severity.FatalLog; s >= severity.InfoLog; s-- {
|
||||||
rb := &redirectBuffer{
|
logging.file[s] = w
|
||||||
w: w,
|
|
||||||
}
|
|
||||||
logging.file[s] = rb
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -868,10 +841,7 @@ func SetOutputBySeverity(name string, w io.Writer) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name))
|
panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name))
|
||||||
}
|
}
|
||||||
rb := &redirectBuffer{
|
logging.file[sev] = w
|
||||||
w: w,
|
|
||||||
}
|
|
||||||
logging.file[sev] = rb
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// LogToStderr sets whether to log exclusively to stderr, bypassing outputs
|
// LogToStderr sets whether to log exclusively to stderr, bypassing outputs
|
||||||
@ -1011,7 +981,8 @@ func (l *loggingT) exit(err error) {
|
|||||||
logExitFunc(err)
|
logExitFunc(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
l.flushAll()
|
needToSync := l.flushAll()
|
||||||
|
l.syncAll(needToSync)
|
||||||
OsExit(2)
|
OsExit(2)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1028,10 +999,6 @@ type syncBuffer struct {
|
|||||||
maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up.
|
maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up.
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sb *syncBuffer) Sync() error {
|
|
||||||
return sb.file.Sync()
|
|
||||||
}
|
|
||||||
|
|
||||||
// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options.
|
// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options.
|
||||||
func CalculateMaxSize() uint64 {
|
func CalculateMaxSize() uint64 {
|
||||||
if logging.logFile != "" {
|
if logging.logFile != "" {
|
||||||
@ -1223,24 +1190,45 @@ func StartFlushDaemon(interval time.Duration) {
|
|||||||
// lockAndFlushAll is like flushAll but locks l.mu first.
|
// lockAndFlushAll is like flushAll but locks l.mu first.
|
||||||
func (l *loggingT) lockAndFlushAll() {
|
func (l *loggingT) lockAndFlushAll() {
|
||||||
l.mu.Lock()
|
l.mu.Lock()
|
||||||
l.flushAll()
|
needToSync := l.flushAll()
|
||||||
l.mu.Unlock()
|
l.mu.Unlock()
|
||||||
|
// Some environments are slow when syncing and holding the lock might cause contention.
|
||||||
|
l.syncAll(needToSync)
|
||||||
}
|
}
|
||||||
|
|
||||||
// flushAll flushes all the logs and attempts to "sync" their data to disk.
|
// flushAll flushes all the logs
|
||||||
// l.mu is held.
|
// l.mu is held.
|
||||||
func (l *loggingT) flushAll() {
|
//
|
||||||
|
// The result is the number of files which need to be synced and the pointers to them.
|
||||||
|
func (l *loggingT) flushAll() fileArray {
|
||||||
|
var needToSync fileArray
|
||||||
|
|
||||||
// Flush from fatal down, in case there's trouble flushing.
|
// Flush from fatal down, in case there's trouble flushing.
|
||||||
for s := severity.FatalLog; s >= severity.InfoLog; s-- {
|
for s := severity.FatalLog; s >= severity.InfoLog; s-- {
|
||||||
file := l.file[s]
|
file := l.file[s]
|
||||||
if file != nil {
|
if sb, ok := file.(*syncBuffer); ok && sb.file != nil {
|
||||||
_ = file.Flush() // ignore error
|
_ = sb.Flush() // ignore error
|
||||||
_ = file.Sync() // ignore error
|
needToSync.files[needToSync.num] = sb.file
|
||||||
|
needToSync.num++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if logging.loggerOptions.flush != nil {
|
if logging.loggerOptions.flush != nil {
|
||||||
logging.loggerOptions.flush()
|
logging.loggerOptions.flush()
|
||||||
}
|
}
|
||||||
|
return needToSync
|
||||||
|
}
|
||||||
|
|
||||||
|
type fileArray struct {
|
||||||
|
num int
|
||||||
|
files [severity.NumSeverity]*os.File
|
||||||
|
}
|
||||||
|
|
||||||
|
// syncAll attempts to "sync" their data to disk.
|
||||||
|
func (l *loggingT) syncAll(needToSync fileArray) {
|
||||||
|
// Flush from fatal down, in case there's trouble flushing.
|
||||||
|
for i := 0; i < needToSync.num; i++ {
|
||||||
|
_ = needToSync.files[i].Sync() // ignore error
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyStandardLogTo arranges for messages written to the Go "log" package's
|
// CopyStandardLogTo arranges for messages written to the Go "log" package's
|
||||||
|
195
api/vendor/k8s.io/utils/net/multi_listen.go
generated
vendored
Normal file
195
api/vendor/k8s.io/utils/net/multi_listen.go
generated
vendored
Normal file
@ -0,0 +1,195 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2024 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package net
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// connErrPair pairs conn and error which is returned by accept on sub-listeners.
|
||||||
|
type connErrPair struct {
|
||||||
|
conn net.Conn
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// multiListener implements net.Listener
|
||||||
|
type multiListener struct {
|
||||||
|
listeners []net.Listener
|
||||||
|
wg sync.WaitGroup
|
||||||
|
|
||||||
|
// connCh passes accepted connections, from child listeners to parent.
|
||||||
|
connCh chan connErrPair
|
||||||
|
// stopCh communicates from parent to child listeners.
|
||||||
|
stopCh chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// compile time check to ensure *multiListener implements net.Listener
|
||||||
|
var _ net.Listener = &multiListener{}
|
||||||
|
|
||||||
|
// MultiListen returns net.Listener which can listen on and accept connections for
|
||||||
|
// the given network on multiple addresses. Internally it uses stdlib to create
|
||||||
|
// sub-listener and multiplexes connection requests using go-routines.
|
||||||
|
// The network must be "tcp", "tcp4" or "tcp6".
|
||||||
|
// It follows the semantics of net.Listen that primarily means:
|
||||||
|
// 1. If the host is an unspecified/zero IP address with "tcp" network, MultiListen
|
||||||
|
// listens on all available unicast and anycast IP addresses of the local system.
|
||||||
|
// 2. Use "tcp4" or "tcp6" to exclusively listen on IPv4 or IPv6 family, respectively.
|
||||||
|
// 3. The host can accept names (e.g, localhost) and it will create a listener for at
|
||||||
|
// most one of the host's IP.
|
||||||
|
func MultiListen(ctx context.Context, network string, addrs ...string) (net.Listener, error) {
|
||||||
|
var lc net.ListenConfig
|
||||||
|
return multiListen(
|
||||||
|
ctx,
|
||||||
|
network,
|
||||||
|
addrs,
|
||||||
|
func(ctx context.Context, network, address string) (net.Listener, error) {
|
||||||
|
return lc.Listen(ctx, network, address)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// multiListen implements MultiListen by consuming stdlib functions as dependency allowing
|
||||||
|
// mocking for unit-testing.
|
||||||
|
func multiListen(
|
||||||
|
ctx context.Context,
|
||||||
|
network string,
|
||||||
|
addrs []string,
|
||||||
|
listenFunc func(ctx context.Context, network, address string) (net.Listener, error),
|
||||||
|
) (net.Listener, error) {
|
||||||
|
if !(network == "tcp" || network == "tcp4" || network == "tcp6") {
|
||||||
|
return nil, fmt.Errorf("network %q not supported", network)
|
||||||
|
}
|
||||||
|
if len(addrs) == 0 {
|
||||||
|
return nil, fmt.Errorf("no address provided to listen on")
|
||||||
|
}
|
||||||
|
|
||||||
|
ml := &multiListener{
|
||||||
|
connCh: make(chan connErrPair),
|
||||||
|
stopCh: make(chan struct{}),
|
||||||
|
}
|
||||||
|
for _, addr := range addrs {
|
||||||
|
l, err := listenFunc(ctx, network, addr)
|
||||||
|
if err != nil {
|
||||||
|
// close all the sub-listeners and exit
|
||||||
|
_ = ml.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ml.listeners = append(ml.listeners, l)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, l := range ml.listeners {
|
||||||
|
ml.wg.Add(1)
|
||||||
|
go func(l net.Listener) {
|
||||||
|
defer ml.wg.Done()
|
||||||
|
for {
|
||||||
|
// Accept() is blocking, unless ml.Close() is called, in which
|
||||||
|
// case it will return immediately with an error.
|
||||||
|
conn, err := l.Accept()
|
||||||
|
// This assumes that ANY error from Accept() will terminate the
|
||||||
|
// sub-listener. We could maybe be more precise, but it
|
||||||
|
// doesn't seem necessary.
|
||||||
|
terminate := err != nil
|
||||||
|
|
||||||
|
select {
|
||||||
|
case ml.connCh <- connErrPair{conn: conn, err: err}:
|
||||||
|
case <-ml.stopCh:
|
||||||
|
// In case we accepted a connection AND were stopped, and
|
||||||
|
// this select-case was chosen, just throw away the
|
||||||
|
// connection. This avoids potentially blocking on connCh
|
||||||
|
// or leaking a connection.
|
||||||
|
if conn != nil {
|
||||||
|
_ = conn.Close()
|
||||||
|
}
|
||||||
|
terminate = true
|
||||||
|
}
|
||||||
|
// Make sure we don't loop on Accept() returning an error and
|
||||||
|
// the select choosing the channel case.
|
||||||
|
if terminate {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(l)
|
||||||
|
}
|
||||||
|
return ml, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accept implements net.Listener. It waits for and returns a connection from
|
||||||
|
// any of the sub-listener.
|
||||||
|
func (ml *multiListener) Accept() (net.Conn, error) {
|
||||||
|
// wait for any sub-listener to enqueue an accepted connection
|
||||||
|
connErr, ok := <-ml.connCh
|
||||||
|
if !ok {
|
||||||
|
// The channel will be closed only when Close() is called on the
|
||||||
|
// multiListener. Closing of this channel implies that all
|
||||||
|
// sub-listeners are also closed, which causes a "use of closed
|
||||||
|
// network connection" error on their Accept() calls. We return the
|
||||||
|
// same error for multiListener.Accept() if multiListener.Close()
|
||||||
|
// has already been called.
|
||||||
|
return nil, fmt.Errorf("use of closed network connection")
|
||||||
|
}
|
||||||
|
return connErr.conn, connErr.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close implements net.Listener. It will close all sub-listeners and wait for
|
||||||
|
// the go-routines to exit.
|
||||||
|
func (ml *multiListener) Close() error {
|
||||||
|
// Make sure this can be called repeatedly without explosions.
|
||||||
|
select {
|
||||||
|
case <-ml.stopCh:
|
||||||
|
return fmt.Errorf("use of closed network connection")
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tell all sub-listeners to stop.
|
||||||
|
close(ml.stopCh)
|
||||||
|
|
||||||
|
// Closing the listeners causes Accept() to immediately return an error in
|
||||||
|
// the sub-listener go-routines.
|
||||||
|
for _, l := range ml.listeners {
|
||||||
|
_ = l.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for all the sub-listener go-routines to exit.
|
||||||
|
ml.wg.Wait()
|
||||||
|
close(ml.connCh)
|
||||||
|
|
||||||
|
// Drain any already-queued connections.
|
||||||
|
for connErr := range ml.connCh {
|
||||||
|
if connErr.conn != nil {
|
||||||
|
_ = connErr.conn.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Addr is an implementation of the net.Listener interface. It always returns
|
||||||
|
// the address of the first listener. Callers should use conn.LocalAddr() to
|
||||||
|
// obtain the actual local address of the sub-listener.
|
||||||
|
func (ml *multiListener) Addr() net.Addr {
|
||||||
|
return ml.listeners[0].Addr()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Addrs is like Addr, but returns the address for all registered listeners.
|
||||||
|
func (ml *multiListener) Addrs() []net.Addr {
|
||||||
|
var ret []net.Addr
|
||||||
|
for _, l := range ml.listeners {
|
||||||
|
ret = append(ret, l.Addr())
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
10
api/vendor/k8s.io/utils/ptr/OWNERS
generated
vendored
Normal file
10
api/vendor/k8s.io/utils/ptr/OWNERS
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
# See the OWNERS docs at https://go.k8s.io/owners
|
||||||
|
|
||||||
|
approvers:
|
||||||
|
- apelisse
|
||||||
|
- stewart-yu
|
||||||
|
- thockin
|
||||||
|
reviewers:
|
||||||
|
- apelisse
|
||||||
|
- stewart-yu
|
||||||
|
- thockin
|
3
api/vendor/k8s.io/utils/ptr/README.md
generated
vendored
Normal file
3
api/vendor/k8s.io/utils/ptr/README.md
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# Pointer
|
||||||
|
|
||||||
|
This package provides some functions for pointer-based operations.
|
73
api/vendor/k8s.io/utils/ptr/ptr.go
generated
vendored
Normal file
73
api/vendor/k8s.io/utils/ptr/ptr.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ptr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when,
|
||||||
|
// for example, an API struct is handled by plugins which need to distinguish
|
||||||
|
// "no plugin accepted this spec" from "this spec is empty".
|
||||||
|
//
|
||||||
|
// This function is only valid for structs and pointers to structs. Any other
|
||||||
|
// type will cause a panic. Passing a typed nil pointer will return true.
|
||||||
|
func AllPtrFieldsNil(obj interface{}) bool {
|
||||||
|
v := reflect.ValueOf(obj)
|
||||||
|
if !v.IsValid() {
|
||||||
|
panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj))
|
||||||
|
}
|
||||||
|
if v.Kind() == reflect.Ptr {
|
||||||
|
if v.IsNil() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
for i := 0; i < v.NumField(); i++ {
|
||||||
|
if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// To returns a pointer to the given value.
|
||||||
|
func To[T any](v T) *T {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deref dereferences ptr and returns the value it points to if no nil, or else
|
||||||
|
// returns def.
|
||||||
|
func Deref[T any](ptr *T, def T) T {
|
||||||
|
if ptr != nil {
|
||||||
|
return *ptr
|
||||||
|
}
|
||||||
|
return def
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal returns true if both arguments are nil or both arguments
|
||||||
|
// dereference to the same value.
|
||||||
|
func Equal[T comparable](a, b *T) bool {
|
||||||
|
if (a == nil) != (b == nil) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if a == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return *a == *b
|
||||||
|
}
|
27
api/vendor/modules.txt
vendored
27
api/vendor/modules.txt
vendored
@ -1,10 +1,13 @@
|
|||||||
# github.com/davecgh/go-spew v1.1.1
|
# github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||||
## explicit
|
## explicit
|
||||||
github.com/davecgh/go-spew/spew
|
github.com/davecgh/go-spew/spew
|
||||||
|
# github.com/fxamacker/cbor/v2 v2.7.0
|
||||||
|
## explicit; go 1.17
|
||||||
|
github.com/fxamacker/cbor/v2
|
||||||
# github.com/ghodss/yaml v1.0.0
|
# github.com/ghodss/yaml v1.0.0
|
||||||
## explicit
|
## explicit
|
||||||
github.com/ghodss/yaml
|
github.com/ghodss/yaml
|
||||||
# github.com/go-logr/logr v1.4.1
|
# github.com/go-logr/logr v1.4.2
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
github.com/go-logr/logr
|
github.com/go-logr/logr
|
||||||
# github.com/gogo/protobuf v1.3.2
|
# github.com/gogo/protobuf v1.3.2
|
||||||
@ -27,20 +30,23 @@ github.com/modern-go/reflect2
|
|||||||
# github.com/openshift/api v0.0.0-20240115183315-0793e918179d
|
# github.com/openshift/api v0.0.0-20240115183315-0793e918179d
|
||||||
## explicit; go 1.21
|
## explicit; go 1.21
|
||||||
github.com/openshift/api/security/v1
|
github.com/openshift/api/security/v1
|
||||||
# github.com/pmezard/go-difflib v1.0.0
|
# github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
|
||||||
## explicit
|
## explicit
|
||||||
github.com/pmezard/go-difflib/difflib
|
github.com/pmezard/go-difflib/difflib
|
||||||
# github.com/stretchr/testify v1.9.0
|
# github.com/stretchr/testify v1.9.0
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
github.com/stretchr/testify/assert
|
github.com/stretchr/testify/assert
|
||||||
github.com/stretchr/testify/require
|
github.com/stretchr/testify/require
|
||||||
# golang.org/x/net v0.23.0
|
# github.com/x448/float16 v0.8.4
|
||||||
|
## explicit; go 1.11
|
||||||
|
github.com/x448/float16
|
||||||
|
# golang.org/x/net v0.26.0
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
golang.org/x/net/http/httpguts
|
golang.org/x/net/http/httpguts
|
||||||
golang.org/x/net/http2
|
golang.org/x/net/http2
|
||||||
golang.org/x/net/http2/hpack
|
golang.org/x/net/http2/hpack
|
||||||
golang.org/x/net/idna
|
golang.org/x/net/idna
|
||||||
# golang.org/x/text v0.14.0
|
# golang.org/x/text v0.16.0
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
golang.org/x/text/secure/bidirule
|
golang.org/x/text/secure/bidirule
|
||||||
golang.org/x/text/transform
|
golang.org/x/text/transform
|
||||||
@ -55,12 +61,12 @@ gopkg.in/yaml.v2
|
|||||||
# gopkg.in/yaml.v3 v3.0.1
|
# gopkg.in/yaml.v3 v3.0.1
|
||||||
## explicit
|
## explicit
|
||||||
gopkg.in/yaml.v3
|
gopkg.in/yaml.v3
|
||||||
# k8s.io/api v0.30.3
|
# k8s.io/api v0.31.0
|
||||||
## explicit; go 1.22.0
|
## explicit; go 1.22.0
|
||||||
k8s.io/api/core/v1
|
k8s.io/api/core/v1
|
||||||
k8s.io/api/rbac/v1
|
k8s.io/api/rbac/v1
|
||||||
k8s.io/api/storage/v1
|
k8s.io/api/storage/v1
|
||||||
# k8s.io/apimachinery v0.30.3
|
# k8s.io/apimachinery v0.31.0
|
||||||
## explicit; go 1.22.0
|
## explicit; go 1.22.0
|
||||||
k8s.io/apimachinery/pkg/api/resource
|
k8s.io/apimachinery/pkg/api/resource
|
||||||
k8s.io/apimachinery/pkg/apis/meta/v1
|
k8s.io/apimachinery/pkg/apis/meta/v1
|
||||||
@ -70,6 +76,8 @@ k8s.io/apimachinery/pkg/fields
|
|||||||
k8s.io/apimachinery/pkg/labels
|
k8s.io/apimachinery/pkg/labels
|
||||||
k8s.io/apimachinery/pkg/runtime
|
k8s.io/apimachinery/pkg/runtime
|
||||||
k8s.io/apimachinery/pkg/runtime/schema
|
k8s.io/apimachinery/pkg/runtime/schema
|
||||||
|
k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct
|
||||||
|
k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes
|
||||||
k8s.io/apimachinery/pkg/selection
|
k8s.io/apimachinery/pkg/selection
|
||||||
k8s.io/apimachinery/pkg/types
|
k8s.io/apimachinery/pkg/types
|
||||||
k8s.io/apimachinery/pkg/util/errors
|
k8s.io/apimachinery/pkg/util/errors
|
||||||
@ -83,7 +91,7 @@ k8s.io/apimachinery/pkg/util/validation
|
|||||||
k8s.io/apimachinery/pkg/util/validation/field
|
k8s.io/apimachinery/pkg/util/validation/field
|
||||||
k8s.io/apimachinery/pkg/watch
|
k8s.io/apimachinery/pkg/watch
|
||||||
k8s.io/apimachinery/third_party/forked/golang/reflect
|
k8s.io/apimachinery/third_party/forked/golang/reflect
|
||||||
# k8s.io/klog/v2 v2.120.1
|
# k8s.io/klog/v2 v2.130.1
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
k8s.io/klog/v2
|
k8s.io/klog/v2
|
||||||
k8s.io/klog/v2/internal/buffer
|
k8s.io/klog/v2/internal/buffer
|
||||||
@ -92,10 +100,11 @@ k8s.io/klog/v2/internal/dbg
|
|||||||
k8s.io/klog/v2/internal/serialize
|
k8s.io/klog/v2/internal/serialize
|
||||||
k8s.io/klog/v2/internal/severity
|
k8s.io/klog/v2/internal/severity
|
||||||
k8s.io/klog/v2/internal/sloghandler
|
k8s.io/klog/v2/internal/sloghandler
|
||||||
# k8s.io/utils v0.0.0-20230726121419-3b25d923346b
|
# k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
k8s.io/utils/internal/third_party/forked/golang/net
|
k8s.io/utils/internal/third_party/forked/golang/net
|
||||||
k8s.io/utils/net
|
k8s.io/utils/net
|
||||||
|
k8s.io/utils/ptr
|
||||||
k8s.io/utils/strings/slices
|
k8s.io/utils/strings/slices
|
||||||
# sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd
|
# sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
|
Loading…
Reference in New Issue
Block a user