build: add yamlgen to build deployment files

This initial version of yamlgen generates deploy/scc.yaml based on the
deployment artifact that is provided by the new api/deploy/ocp package.

Signed-off-by: Niels de Vos <ndevos@redhat.com>
This commit is contained in:
Niels de Vos 2021-10-01 14:08:56 +02:00 committed by mergify[bot]
parent 36e099d939
commit 5ea99fdd5b
32 changed files with 8850 additions and 2 deletions

View File

@ -163,8 +163,13 @@ cephcsi: check-env
e2e.test: check-env
go test $(GO_TAGS) -mod=vendor -c ./e2e
deploy/scc.yaml: api/deploy/ocp/scc.go
$(MAKE) -C tools generate-deploy
#
# Update the generated deploy/ files when the template changed. This requires
# running 'go mod vendor' so update the API files under the vendor/ directory.
.PHONY: generate-deploy
generate-deploy:
go mod vendor
$(MAKE) -C deploy
#
# e2e testing by compiling e2e.test in case it does not exist and running the

19
deploy/Makefile Normal file
View File

@ -0,0 +1,19 @@
# Copyright 2021 The Ceph-CSI Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.PHONY: all
all: scc.yaml
scc.yaml: ../api/deploy/ocp/scc.yaml ../api/deploy/ocp/scc.go
$(MAKE) -C ../tools generate-deploy

51
deploy/scc.yaml Normal file
View File

@ -0,0 +1,51 @@
---
#
# /!\ DO NOT MODIFY THIS FILE
#
# This file has been automatically generated by Ceph-CSI yamlgen.
# The source for the contents can be found in the api/deploy directory, make
# your modifications there.
#
---
kind: SecurityContextConstraints
apiVersion: security.openshift.io/v1
metadata:
name: "ceph-csi"
# To allow running privilegedContainers
allowPrivilegedContainer: true
# CSI daemonset pod needs hostnetworking
allowHostNetwork: true
# This need to be set to true as we use HostPath
allowHostDirVolumePlugin: true
priority:
# SYS_ADMIN is needed for rbd to execture rbd map command
allowedCapabilities: ["SYS_ADMIN"]
# Needed as we run liveness container on daemonset pods
allowHostPorts: true
# Needed as we are setting this in RBD plugin pod
allowHostPID: true
# Required for encryption
allowHostIPC: true
# Set to false as we write to RootFilesystem inside csi containers
readOnlyRootFilesystem: false
runAsUser:
type: RunAsAny
seLinuxContext:
type: RunAsAny
fsGroup:
type: RunAsAny
supplementalGroups:
type: RunAsAny
# The type of volumes which are mounted to csi pods
volumes:
- configMap
- projected
- emptyDir
- hostPath
users:
# A user needs to be added for each service account.
- "system:serviceaccount:ceph-csi:csi-rbd-plugin-sa"
- "system:serviceaccount:ceph-csi:csi-rbd-provisioner-sa"
- "system:serviceaccount:ceph-csi:csi-cephfs-plugin-sa"
# yamllint disable-line rule:line-length
- "system:serviceaccount:ceph-csi:csi-cephfs-provisioner-sa"

2
go.mod
View File

@ -4,6 +4,7 @@ go 1.16
require (
github.com/aws/aws-sdk-go v1.40.50
github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000
github.com/ceph/go-ceph v0.11.0
github.com/container-storage-interface/spec v1.5.0
github.com/csi-addons/replication-lib-utils v0.2.0
@ -37,6 +38,7 @@ require (
replace (
code.cloudfoundry.org/gofileutils => github.com/cloudfoundry/gofileutils v0.0.0-20170111115228-4d0c80011a0f
github.com/ceph/ceph-csi/api => ./api
github.com/golang/protobuf => github.com/golang/protobuf v1.4.3
github.com/hashicorp/vault/sdk => github.com/hashicorp/vault/sdk v0.1.14-0.20201116234512-b4d4137dfe8b
github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3

15
go.sum
View File

@ -241,6 +241,11 @@ github.com/csi-addons/spec v0.1.1 h1:Bm9ZVCQ+nYMs7Y5PK+izkzCeer262W4rjCyGpuqu9C4
github.com/csi-addons/spec v0.1.1/go.mod h1:Mwq4iLiUV4s+K1bszcWU6aMsR5KPsbIYzzszJ6+56vI=
github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
github.com/dave/dst v0.26.2/go.mod h1:UMDJuIRPfyUCC78eFuB+SV/WI8oDeyFDvM/JR6NI3IU=
github.com/dave/gopackages v0.0.0-20170318123100-46e7023ec56e/go.mod h1:i00+b/gKdIDIxuLDFob7ustLAVqhsZRk2qVZrArELGQ=
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
github.com/dave/kerr v0.0.0-20170318121727-bc25dd6abe8e/go.mod h1:qZqlPyPvfsDJt+3wHJ1EvSXDuVjFTK0j2p/ca+gtsb8=
github.com/dave/rebecca v0.9.1/go.mod h1:N6XYdMD/OKw3lkF3ywh8Z6wPGuwNFDNtWYEMFWEmXBA=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -315,6 +320,7 @@ github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7/go.mod h1:GeIq9qoE
github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56/go.mod h1:w9RqFVO2BM3xwWEcAB8Fwp0OviTBBEiRmSBDfbXnd3w=
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew=
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I=
github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
@ -421,6 +427,7 @@ github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181127221834-b4f47329b966/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
@ -807,7 +814,10 @@ github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.m
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
github.com/openshift/api v0.0.0-20210105115604-44119421ec6b/go.mod h1:aqU5Cq+kqKKPbDMqxo9FojgDeSpNJI7iuskjXjtojDg=
github.com/openshift/api v0.0.0-20210927171657-636513e97fda h1:VoJmrqbFDuqzjlByItbjx/HxmReK4LC+X3Jt2Wv2Ogs=
github.com/openshift/api v0.0.0-20210927171657-636513e97fda/go.mod h1:RsQCVJu4qhUawxxDP7pGlwU3IA4F01wYm3qKEu29Su8=
github.com/openshift/build-machinery-go v0.0.0-20200917070002-f171684f77ab/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
github.com/openshift/build-machinery-go v0.0.0-20210712174854-1bb7fd1518d3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47/go.mod h1:u7NRAjtYVAKokiI9LouzTv4mhds8P4S1TwdVAfbjKSk=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
@ -912,6 +922,7 @@ github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shirou/gopsutil v2.19.9+incompatible h1:IrPVlK4nfwW10DF7pW+7YJKws9NkgNzWozwwWv9FsgY=
github.com/shirou/gopsutil v2.19.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
@ -1057,6 +1068,7 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE=
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -1197,6 +1209,7 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1345,6 +1358,7 @@ golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjs
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
@ -1506,6 +1520,7 @@ gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76
gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=

17
tools/Makefile Normal file
View File

@ -0,0 +1,17 @@
# Copyright 2021 The Ceph-CSI Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.PHONY: generate-deploy
generate-deploy: yamlgen/main.go
go run yamlgen/main.go

8
tools/README.md Normal file
View File

@ -0,0 +1,8 @@
# Assorted Tools for maintaining and building Ceph-CSI
## `yamlgen`
`yamlgen` reads deployment configurations from the `api/` package and generates
YAML files that can be used for deploying without advanced automation like
Rook. The generated files are located under `deploy/`.

83
tools/yamlgen/main.go Normal file
View File

@ -0,0 +1,83 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"os"
"github.com/ceph/ceph-csi/api/deploy/ocp"
)
const header = `---
#
# /!\ DO NOT MODIFY THIS FILE
#
# This file has been automatically generated by Ceph-CSI yamlgen.
# The source for the contents can be found in the api/deploy directory, make
# your modifications there.
#
`
type deploymentArtifact struct {
filename string
// FIXME: This is not dynamic enough for additional YAML generating
// functions. Need to look into typecasting the functions and passing
// interface{} instead of ocp.SecurityContextConstraintsValues.
yamlFunc func(ocp.SecurityContextConstraintsValues) (string, error)
defaults ocp.SecurityContextConstraintsValues
}
var yamlArtifacts = []deploymentArtifact{
{
"../deploy/scc.yaml",
ocp.NewSecurityContextConstraintsYAML,
ocp.SecurityContextConstraintsDefaults,
},
}
func main() {
for _, artifact := range yamlArtifacts {
writeArtifact(artifact)
}
}
func writeArtifact(artifact deploymentArtifact) {
fmt.Printf("creating %q...", artifact.filename)
f, err := os.Create(artifact.filename)
if err != nil {
panic(fmt.Sprintf("failed to create file %q: %v", artifact.filename, err))
}
_, err = f.WriteString(header)
if err != nil {
panic(fmt.Sprintf("failed to write header to %q: %v", artifact.filename, err))
}
data, err := artifact.yamlFunc(artifact.defaults)
if err != nil {
panic(fmt.Sprintf("failed to generate YAML for %q: %v", artifact.filename, err))
}
_, err = f.WriteString(data)
if err != nil {
panic(fmt.Sprintf("failed to write contents to %q: %v", artifact.filename, err))
}
fmt.Println("done!")
}

20
vendor/github.com/ceph/ceph-csi/api/deploy/ocp/doc.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package ocp contains functions to obtain standard and recommended
// deployment artifacts for OpenShift. These artifacts can be used by
// automation tools that want to deploy Ceph-CSI.
package ocp

107
vendor/github.com/ceph/ceph-csi/api/deploy/ocp/scc.go generated vendored Normal file
View File

@ -0,0 +1,107 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ocp
import (
"bytes"
_ "embed"
"fmt"
"text/template"
"github.com/ghodss/yaml"
secv1 "github.com/openshift/api/security/v1"
)
//go:embed scc.yaml
var securityContextConstraints string
// SecurityContextConstraintsValues contains values that need replacing in the
// template.
type SecurityContextConstraintsValues struct {
// Namespace contains the OpenShift Namespace where the SCC will be
// used.
Namespace string
// Deployer refers to the Operator that creates the SCC and
// ServiceAccounts. This is an optional option.
Deployer string
}
// SecurityContextConstraintsDefaults can be used for generating deployment
// artifacts with defails values.
var SecurityContextConstraintsDefaults = SecurityContextConstraintsValues{
Namespace: "ceph-csi",
Deployer: "",
}
// NewSecurityContextConstraints creates a new SecurityContextConstraints
// object by replacing variables in the template by the values set in the
// SecurityContextConstraintsValues.
//
// The deployer parameter (when not an empty string) is used as a prefix for
// the name of the SCC and the linked ServiceAccounts.
func NewSecurityContextConstraints(values SecurityContextConstraintsValues) (*secv1.SecurityContextConstraints, error) {
data, err := NewSecurityContextConstraintsYAML(values)
if err != nil {
return nil, err
}
scc := &secv1.SecurityContextConstraints{}
err = yaml.Unmarshal([]byte(data), scc)
if err != nil {
return nil, fmt.Errorf("failed convert YAML to %T: %w", scc, err)
}
return scc, nil
}
// internalSecurityContextConstraintsValues extends
// SecurityContextConstraintsValues with some private attributes that may get
// set based on other values.
type internalSecurityContextConstraintsValues struct {
SecurityContextConstraintsValues
// Prefix is based on SecurityContextConstraintsValues.Deployer.
Prefix string
}
// NewSecurityContextConstraintsYAML returns a YAML string where the variables
// in the template have been replaced by the values set in the
// SecurityContextConstraintsValues.
func NewSecurityContextConstraintsYAML(values SecurityContextConstraintsValues) (string, error) {
var buf bytes.Buffer
// internalValues is a copy of values, but will get extended with
// API-internal values
internalValues := internalSecurityContextConstraintsValues{
SecurityContextConstraintsValues: values,
}
if internalValues.Deployer != "" {
internalValues.Prefix = internalValues.Deployer + "-"
}
tmpl, err := template.New("SCC").Parse(securityContextConstraints)
if err != nil {
return "", fmt.Errorf("failed to parse template: %w", err)
}
err = tmpl.Execute(&buf, internalValues)
if err != nil {
return "", fmt.Errorf("failed to replace values in template: %w", err)
}
return buf.String(), nil
}

View File

@ -0,0 +1,43 @@
---
kind: SecurityContextConstraints
apiVersion: security.openshift.io/v1
metadata:
name: "{{ .Prefix }}ceph-csi"
# To allow running privilegedContainers
allowPrivilegedContainer: true
# CSI daemonset pod needs hostnetworking
allowHostNetwork: true
# This need to be set to true as we use HostPath
allowHostDirVolumePlugin: true
priority:
# SYS_ADMIN is needed for rbd to execture rbd map command
allowedCapabilities: ["SYS_ADMIN"]
# Needed as we run liveness container on daemonset pods
allowHostPorts: true
# Needed as we are setting this in RBD plugin pod
allowHostPID: true
# Required for encryption
allowHostIPC: true
# Set to false as we write to RootFilesystem inside csi containers
readOnlyRootFilesystem: false
runAsUser:
type: RunAsAny
seLinuxContext:
type: RunAsAny
fsGroup:
type: RunAsAny
supplementalGroups:
type: RunAsAny
# The type of volumes which are mounted to csi pods
volumes:
- configMap
- projected
- emptyDir
- hostPath
users:
# A user needs to be added for each service account.
- "system:serviceaccount:{{ .Namespace }}:{{ .Prefix }}csi-rbd-plugin-sa"
- "system:serviceaccount:{{ .Namespace }}:{{ .Prefix }}csi-rbd-provisioner-sa"
- "system:serviceaccount:{{ .Namespace }}:{{ .Prefix }}csi-cephfs-plugin-sa"
# yamllint disable-line rule:line-length
- "system:serviceaccount:{{ .Namespace }}:{{ .Prefix }}csi-cephfs-provisioner-sa"

20
vendor/github.com/ghodss/yaml/.gitignore generated vendored Normal file
View File

@ -0,0 +1,20 @@
# OSX leaves these everywhere on SMB shares
._*
# Eclipse files
.classpath
.project
.settings/**
# Emacs save files
*~
# Vim-related files
[._]*.s[a-w][a-z]
[._]s[a-w][a-z]
*.un~
Session.vim
.netrwhist
# Go test binaries
*.test

8
vendor/github.com/ghodss/yaml/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,8 @@
language: go
go:
- "1.9"
- "1.10"
- "1.11"
script:
- go test
- go build

50
vendor/github.com/ghodss/yaml/LICENSE generated vendored Normal file
View File

@ -0,0 +1,50 @@
The MIT License (MIT)
Copyright (c) 2014 Sam Ghods
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

121
vendor/github.com/ghodss/yaml/README.md generated vendored Normal file
View File

@ -0,0 +1,121 @@
# YAML marshaling and unmarshaling support for Go
[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml)
## Introduction
A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
## Compatibility
This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
## Caveats
**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
```
BAD:
exampleKey: !!binary gIGC
GOOD:
exampleKey: gIGC
... and decode the base64 data in your code.
```
**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
## Installation and usage
To install, run:
```
$ go get github.com/ghodss/yaml
```
And import using:
```
import "github.com/ghodss/yaml"
```
Usage is very similar to the JSON library:
```go
package main
import (
"fmt"
"github.com/ghodss/yaml"
)
type Person struct {
Name string `json:"name"` // Affects YAML field names too.
Age int `json:"age"`
}
func main() {
// Marshal a Person struct to YAML.
p := Person{"John", 30}
y, err := yaml.Marshal(p)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(string(y))
/* Output:
age: 30
name: John
*/
// Unmarshal the YAML back into a Person struct.
var p2 Person
err = yaml.Unmarshal(y, &p2)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(p2)
/* Output:
{John 30}
*/
}
```
`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
```go
package main
import (
"fmt"
"github.com/ghodss/yaml"
)
func main() {
j := []byte(`{"name": "John", "age": 30}`)
y, err := yaml.JSONToYAML(j)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(string(y))
/* Output:
name: John
age: 30
*/
j2, err := yaml.YAMLToJSON(y)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(string(j2))
/* Output:
{"age":30,"name":"John"}
*/
}
```

501
vendor/github.com/ghodss/yaml/fields.go generated vendored Normal file
View File

@ -0,0 +1,501 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package yaml
import (
"bytes"
"encoding"
"encoding/json"
"reflect"
"sort"
"strings"
"sync"
"unicode"
"unicode/utf8"
)
// indirect walks down v allocating pointers as needed,
// until it gets to a non-pointer.
// if it encounters an Unmarshaler, indirect stops and returns that.
// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
// If v is a named type and is addressable,
// start with its address, so that if the type has pointer methods,
// we find them.
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
v = v.Addr()
}
for {
// Load value from interface, but only if the result will be
// usefully addressable.
if v.Kind() == reflect.Interface && !v.IsNil() {
e := v.Elem()
if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
v = e
continue
}
}
if v.Kind() != reflect.Ptr {
break
}
if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
break
}
if v.IsNil() {
if v.CanSet() {
v.Set(reflect.New(v.Type().Elem()))
} else {
v = reflect.New(v.Type().Elem())
}
}
if v.Type().NumMethod() > 0 {
if u, ok := v.Interface().(json.Unmarshaler); ok {
return u, nil, reflect.Value{}
}
if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
return nil, u, reflect.Value{}
}
}
v = v.Elem()
}
return nil, nil, v
}
// A field represents a single field found in a struct.
type field struct {
name string
nameBytes []byte // []byte(name)
equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
tag bool
index []int
typ reflect.Type
omitEmpty bool
quoted bool
}
func fillField(f field) field {
f.nameBytes = []byte(f.name)
f.equalFold = foldFunc(f.nameBytes)
return f
}
// byName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from json tag", then
// breaking ties with index sequence.
type byName []field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that JSON should recognize for the given type.
// The algorithm is breadth-first search over the set of structs to include - the top struct
// and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.PkgPath != "" { // unexported
continue
}
tag := sf.Tag.Get("json")
if tag == "-" {
continue
}
name, opts := parseTag(tag)
if !isValidTag(name) {
name = ""
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := name != ""
if name == "" {
name = sf.Name
}
fields = append(fields, fillField(field{
name: name,
tag: tagged,
index: index,
typ: ft,
omitEmpty: opts.Contains("omitempty"),
quoted: opts.Contains("string"),
}))
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
}
}
}
}
sort.Sort(byName(fields))
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with JSON tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
return fields
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// JSON tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
if f.tag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
sync.RWMutex
m map[reflect.Type][]field
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
fieldCache.RLock()
f := fieldCache.m[t]
fieldCache.RUnlock()
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.Lock()
if fieldCache.m == nil {
fieldCache.m = map[reflect.Type][]field{}
}
fieldCache.m[t] = f
fieldCache.Unlock()
return f
}
func isValidTag(s string) bool {
if s == "" {
return false
}
for _, c := range s {
switch {
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
// Backslash and quote chars are reserved, but
// otherwise any punctuation chars are allowed
// in a tag name.
default:
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
return false
}
}
}
return true
}
const (
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
kelvin = '\u212a'
smallLongEss = '\u017f'
)
// foldFunc returns one of four different case folding equivalence
// functions, from most general (and slow) to fastest:
//
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
// 3) asciiEqualFold, no special, but includes non-letters (including _)
// 4) simpleLetterEqualFold, no specials, no non-letters.
//
// The letters S and K are special because they map to 3 runes, not just 2:
// * S maps to s and to U+017F 'ſ' Latin small letter long s
// * k maps to K and to U+212A '' Kelvin sign
// See http://play.golang.org/p/tTxjOc0OGo
//
// The returned function is specialized for matching against s and
// should only be given s. It's not curried for performance reasons.
func foldFunc(s []byte) func(s, t []byte) bool {
nonLetter := false
special := false // special letter
for _, b := range s {
if b >= utf8.RuneSelf {
return bytes.EqualFold
}
upper := b & caseMask
if upper < 'A' || upper > 'Z' {
nonLetter = true
} else if upper == 'K' || upper == 'S' {
// See above for why these letters are special.
special = true
}
}
if special {
return equalFoldRight
}
if nonLetter {
return asciiEqualFold
}
return simpleLetterEqualFold
}
// equalFoldRight is a specialization of bytes.EqualFold when s is
// known to be all ASCII (including punctuation), but contains an 's',
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
// See comments on foldFunc.
func equalFoldRight(s, t []byte) bool {
for _, sb := range s {
if len(t) == 0 {
return false
}
tb := t[0]
if tb < utf8.RuneSelf {
if sb != tb {
sbUpper := sb & caseMask
if 'A' <= sbUpper && sbUpper <= 'Z' {
if sbUpper != tb&caseMask {
return false
}
} else {
return false
}
}
t = t[1:]
continue
}
// sb is ASCII and t is not. t must be either kelvin
// sign or long s; sb must be s, S, k, or K.
tr, size := utf8.DecodeRune(t)
switch sb {
case 's', 'S':
if tr != smallLongEss {
return false
}
case 'k', 'K':
if tr != kelvin {
return false
}
default:
return false
}
t = t[size:]
}
if len(t) > 0 {
return false
}
return true
}
// asciiEqualFold is a specialization of bytes.EqualFold for use when
// s is all ASCII (but may contain non-letters) and contains no
// special-folding letters.
// See comments on foldFunc.
func asciiEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, sb := range s {
tb := t[i]
if sb == tb {
continue
}
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
if sb&caseMask != tb&caseMask {
return false
}
} else {
return false
}
}
return true
}
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
// use when s is all ASCII letters (no underscores, etc) and also
// doesn't contain 'k', 'K', 's', or 'S'.
// See comments on foldFunc.
func simpleLetterEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, b := range s {
if b&caseMask != t[i]&caseMask {
return false
}
}
return true
}
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
if idx := strings.Index(tag, ","); idx != -1 {
return tag[:idx], tagOptions(tag[idx+1:])
}
return tag, tagOptions("")
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var next string
i := strings.Index(s, ",")
if i >= 0 {
s, next = s[:i], s[i+1:]
}
if s == optionName {
return true
}
s = next
}
return false
}

3
vendor/github.com/ghodss/yaml/go.mod generated vendored Normal file
View File

@ -0,0 +1,3 @@
module github.com/ghodss/yaml
require gopkg.in/yaml.v2 v2.2.2

3
vendor/github.com/ghodss/yaml/go.sum generated vendored Normal file
View File

@ -0,0 +1,3 @@
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

326
vendor/github.com/ghodss/yaml/yaml.go generated vendored Normal file
View File

@ -0,0 +1,326 @@
// Package yaml provides a wrapper around go-yaml designed to enable a better
// way of handling YAML when marshaling to and from structs.
//
// In short, this package first converts YAML to JSON using go-yaml and then
// uses json.Marshal and json.Unmarshal to convert to or from the struct. This
// means that it effectively reuses the JSON struct tags as well as the custom
// JSON methods MarshalJSON and UnmarshalJSON unlike go-yaml.
//
// See also http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang
//
package yaml // import "github.com/ghodss/yaml"
import (
"bytes"
"encoding/json"
"fmt"
"io"
"reflect"
"strconv"
"gopkg.in/yaml.v2"
)
// Marshals the object into JSON then converts JSON to YAML and returns the
// YAML.
func Marshal(o interface{}) ([]byte, error) {
j, err := json.Marshal(o)
if err != nil {
return nil, fmt.Errorf("error marshaling into JSON: %v", err)
}
y, err := JSONToYAML(j)
if err != nil {
return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
}
return y, nil
}
// JSONOpt is a decoding option for decoding from JSON format.
type JSONOpt func(*json.Decoder) *json.Decoder
// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object,
// optionally configuring the behavior of the JSON unmarshal.
func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error {
return unmarshal(yaml.Unmarshal, y, o, opts)
}
// UnmarshalStrict is like Unmarshal except that any mapping keys that are
// duplicates will result in an error.
// To also be strict about unknown fields, add the DisallowUnknownFields option.
func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error {
return unmarshal(yaml.UnmarshalStrict, y, o, opts)
}
func unmarshal(f func(in []byte, out interface{}) (err error), y []byte, o interface{}, opts []JSONOpt) error {
vo := reflect.ValueOf(o)
j, err := yamlToJSON(y, &vo, f)
if err != nil {
return fmt.Errorf("error converting YAML to JSON: %v", err)
}
err = jsonUnmarshal(bytes.NewReader(j), o, opts...)
if err != nil {
return fmt.Errorf("error unmarshaling JSON: %v", err)
}
return nil
}
// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the
// object, optionally applying decoder options prior to decoding. We are not
// using json.Unmarshal directly as we want the chance to pass in non-default
// options.
func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error {
d := json.NewDecoder(r)
for _, opt := range opts {
d = opt(d)
}
if err := d.Decode(&o); err != nil {
return fmt.Errorf("while decoding JSON: %v", err)
}
return nil
}
// Convert JSON to YAML.
func JSONToYAML(j []byte) ([]byte, error) {
// Convert the JSON to an object.
var jsonObj interface{}
// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
// Go JSON library doesn't try to pick the right number type (int, float,
// etc.) when unmarshalling to interface{}, it just picks float64
// universally. go-yaml does go through the effort of picking the right
// number type, so we can preserve number type throughout this process.
err := yaml.Unmarshal(j, &jsonObj)
if err != nil {
return nil, err
}
// Marshal this object into YAML.
return yaml.Marshal(jsonObj)
}
// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML,
// passing JSON through this method should be a no-op.
//
// Things YAML can do that are not supported by JSON:
// * In YAML you can have binary and null keys in your maps. These are invalid
// in JSON. (int and float keys are converted to strings.)
// * Binary data in YAML with the !!binary tag is not supported. If you want to
// use binary data with this library, encode the data as base64 as usual but do
// not use the !!binary tag in your YAML. This will ensure the original base64
// encoded data makes it all the way through to the JSON.
//
// For strict decoding of YAML, use YAMLToJSONStrict.
func YAMLToJSON(y []byte) ([]byte, error) {
return yamlToJSON(y, nil, yaml.Unmarshal)
}
// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding,
// returning an error on any duplicate field names.
func YAMLToJSONStrict(y []byte) ([]byte, error) {
return yamlToJSON(y, nil, yaml.UnmarshalStrict)
}
func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) {
// Convert the YAML to an object.
var yamlObj interface{}
err := yamlUnmarshal(y, &yamlObj)
if err != nil {
return nil, err
}
// YAML objects are not completely compatible with JSON objects (e.g. you
// can have non-string keys in YAML). So, convert the YAML-compatible object
// to a JSON-compatible object, failing with an error if irrecoverable
// incompatibilities happen along the way.
jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
if err != nil {
return nil, err
}
// Convert this object to JSON and return the data.
return json.Marshal(jsonObj)
}
func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
var err error
// Resolve jsonTarget to a concrete value (i.e. not a pointer or an
// interface). We pass decodingNull as false because we're not actually
// decoding into the value, we're just checking if the ultimate target is a
// string.
if jsonTarget != nil {
ju, tu, pv := indirect(*jsonTarget, false)
// We have a JSON or Text Umarshaler at this level, so we can't be trying
// to decode into a string.
if ju != nil || tu != nil {
jsonTarget = nil
} else {
jsonTarget = &pv
}
}
// If yamlObj is a number or a boolean, check if jsonTarget is a string -
// if so, coerce. Else return normal.
// If yamlObj is a map or array, find the field that each key is
// unmarshaling to, and when you recurse pass the reflect.Value for that
// field back into this function.
switch typedYAMLObj := yamlObj.(type) {
case map[interface{}]interface{}:
// JSON does not support arbitrary keys in a map, so we must convert
// these keys to strings.
//
// From my reading of go-yaml v2 (specifically the resolve function),
// keys can only have the types string, int, int64, float64, binary
// (unsupported), or null (unsupported).
strMap := make(map[string]interface{})
for k, v := range typedYAMLObj {
// Resolve the key to a string first.
var keyString string
switch typedKey := k.(type) {
case string:
keyString = typedKey
case int:
keyString = strconv.Itoa(typedKey)
case int64:
// go-yaml will only return an int64 as a key if the system
// architecture is 32-bit and the key's value is between 32-bit
// and 64-bit. Otherwise the key type will simply be int.
keyString = strconv.FormatInt(typedKey, 10)
case float64:
// Stolen from go-yaml to use the same conversion to string as
// the go-yaml library uses to convert float to string when
// Marshaling.
s := strconv.FormatFloat(typedKey, 'g', -1, 32)
switch s {
case "+Inf":
s = ".inf"
case "-Inf":
s = "-.inf"
case "NaN":
s = ".nan"
}
keyString = s
case bool:
if typedKey {
keyString = "true"
} else {
keyString = "false"
}
default:
return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
reflect.TypeOf(k), k, v)
}
// jsonTarget should be a struct or a map. If it's a struct, find
// the field it's going to map to and pass its reflect.Value. If
// it's a map, find the element type of the map and pass the
// reflect.Value created from that type. If it's neither, just pass
// nil - JSON conversion will error for us if it's a real issue.
if jsonTarget != nil {
t := *jsonTarget
if t.Kind() == reflect.Struct {
keyBytes := []byte(keyString)
// Find the field that the JSON library would use.
var f *field
fields := cachedTypeFields(t.Type())
for i := range fields {
ff := &fields[i]
if bytes.Equal(ff.nameBytes, keyBytes) {
f = ff
break
}
// Do case-insensitive comparison.
if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
f = ff
}
}
if f != nil {
// Find the reflect.Value of the most preferential
// struct field.
jtf := t.Field(f.index[0])
strMap[keyString], err = convertToJSONableObject(v, &jtf)
if err != nil {
return nil, err
}
continue
}
} else if t.Kind() == reflect.Map {
// Create a zero value of the map's element type to use as
// the JSON target.
jtv := reflect.Zero(t.Type().Elem())
strMap[keyString], err = convertToJSONableObject(v, &jtv)
if err != nil {
return nil, err
}
continue
}
}
strMap[keyString], err = convertToJSONableObject(v, nil)
if err != nil {
return nil, err
}
}
return strMap, nil
case []interface{}:
// We need to recurse into arrays in case there are any
// map[interface{}]interface{}'s inside and to convert any
// numbers to strings.
// If jsonTarget is a slice (which it really should be), find the
// thing it's going to map to. If it's not a slice, just pass nil
// - JSON conversion will error for us if it's a real issue.
var jsonSliceElemValue *reflect.Value
if jsonTarget != nil {
t := *jsonTarget
if t.Kind() == reflect.Slice {
// By default slices point to nil, but we need a reflect.Value
// pointing to a value of the slice type, so we create one here.
ev := reflect.Indirect(reflect.New(t.Type().Elem()))
jsonSliceElemValue = &ev
}
}
// Make and use a new array.
arr := make([]interface{}, len(typedYAMLObj))
for i, v := range typedYAMLObj {
arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
if err != nil {
return nil, err
}
}
return arr, nil
default:
// If the target type is a string and the YAML type is a number,
// convert the YAML type to a string.
if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
// Based on my reading of go-yaml, it may return int, int64,
// float64, or uint64.
var s string
switch typedVal := typedYAMLObj.(type) {
case int:
s = strconv.FormatInt(int64(typedVal), 10)
case int64:
s = strconv.FormatInt(typedVal, 10)
case float64:
s = strconv.FormatFloat(typedVal, 'g', -1, 32)
case uint64:
s = strconv.FormatUint(typedVal, 10)
case bool:
if typedVal {
s = "true"
} else {
s = "false"
}
}
if len(s) > 0 {
yamlObj = interface{}(s)
}
}
return yamlObj, nil
}
return nil, nil
}

14
vendor/github.com/ghodss/yaml/yaml_go110.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// This file contains changes that are only compatible with go 1.10 and onwards.
// +build go1.10
package yaml
import "encoding/json"
// DisallowUnknownFields configures the JSON decoder to error out if unknown
// fields come along, instead of dropping them by default.
func DisallowUnknownFields(d *json.Decoder) *json.Decoder {
d.DisallowUnknownFields()
return d
}

191
vendor/github.com/openshift/api/LICENSE generated vendored Normal file
View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2020 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,279 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
api-approved.openshift.io: https://github.com/openshift/api/pull/470
include.release.openshift.io/ibm-cloud-managed: "true"
include.release.openshift.io/self-managed-high-availability: "true"
include.release.openshift.io/single-node-developer: "true"
name: securitycontextconstraints.security.openshift.io
spec:
group: security.openshift.io
names:
kind: SecurityContextConstraints
listKind: SecurityContextConstraintsList
plural: securitycontextconstraints
singular: securitycontextconstraints
scope: Cluster
versions:
- additionalPrinterColumns:
- description: Determines if a container can request to be run as privileged
jsonPath: .allowPrivilegedContainer
name: Priv
type: string
- description: A list of capabilities that can be requested to add to the container
jsonPath: .allowedCapabilities
name: Caps
type: string
- description: Strategy that will dictate what labels will be set in the SecurityContext
jsonPath: .seLinuxContext.type
name: SELinux
type: string
- description: Strategy that will dictate what RunAsUser is used in the SecurityContext
jsonPath: .runAsUser.type
name: RunAsUser
type: string
- description: Strategy that will dictate what fs group is used by the SecurityContext
jsonPath: .fsGroup.type
name: FSGroup
type: string
- description: Strategy that will dictate what supplemental groups are used by the SecurityContext
jsonPath: .supplementalGroups.type
name: SupGroup
type: string
- description: Sort order of SCCs
jsonPath: .priority
name: Priority
type: string
- description: Force containers to run with a read only root file system
jsonPath: .readOnlyRootFilesystem
name: ReadOnlyRootFS
type: string
- description: White list of allowed volume plugins
jsonPath: .volumes
name: Volumes
type: string
name: v1
schema:
openAPIV3Schema:
description: "SecurityContextConstraints governs the ability to make requests that affect the SecurityContext that will be applied to a container. For historical reasons SCC was exposed under the core Kubernetes API group. That exposure is deprecated and will be removed in a future release - users should instead use the security.openshift.io group to manage SecurityContextConstraints. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)."
type: object
required:
- allowHostDirVolumePlugin
- allowHostIPC
- allowHostNetwork
- allowHostPID
- allowHostPorts
- allowPrivilegedContainer
- allowedCapabilities
- defaultAddCapabilities
- priority
- readOnlyRootFilesystem
- requiredDropCapabilities
- volumes
properties:
allowHostDirVolumePlugin:
description: AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin
type: boolean
allowHostIPC:
description: AllowHostIPC determines if the policy allows host ipc in the containers.
type: boolean
allowHostNetwork:
description: AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec.
type: boolean
allowHostPID:
description: AllowHostPID determines if the policy allows host pid in the containers.
type: boolean
allowHostPorts:
description: AllowHostPorts determines if the policy allows host ports in the containers.
type: boolean
allowPrivilegeEscalation:
description: AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.
type: boolean
nullable: true
allowPrivilegedContainer:
description: AllowPrivilegedContainer determines if a container can request to be run as privileged.
type: boolean
allowedCapabilities:
description: AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field maybe added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. To allow all capabilities you may use '*'.
type: array
items:
description: Capability represent POSIX capabilities type
type: string
nullable: true
allowedFlexVolumes:
description: AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the "Volumes" field.
type: array
items:
description: AllowedFlexVolume represents a single Flexvolume that is allowed to be used.
type: object
required:
- driver
properties:
driver:
description: Driver is the name of the Flexvolume driver.
type: string
nullable: true
allowedUnsafeSysctls:
description: "AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. \n Examples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc."
type: array
items:
type: string
nullable: true
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
defaultAddCapabilities:
description: DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.
type: array
items:
description: Capability represent POSIX capabilities type
type: string
nullable: true
defaultAllowPrivilegeEscalation:
description: DefaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.
type: boolean
nullable: true
forbiddenSysctls:
description: "ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. \n Examples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc."
type: array
items:
type: string
nullable: true
fsGroup:
description: FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.
type: object
properties:
ranges:
description: Ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end.
type: array
items:
description: 'IDRange provides a min/max of an allowed range of IDs. TODO: this could be reused for UIDs.'
type: object
properties:
max:
description: Max is the end of the range, inclusive.
type: integer
format: int64
min:
description: Min is the start of the range, inclusive.
type: integer
format: int64
type:
description: Type is the strategy that will dictate what FSGroup is used in the SecurityContext.
type: string
nullable: true
groups:
description: The groups that have permission to use this security context constraints
type: array
items:
type: string
nullable: true
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
priority:
description: Priority influences the sort order of SCCs when evaluating which SCCs to try first for a given pod request based on access in the Users and Groups fields. The higher the int, the higher priority. An unset value is considered a 0 priority. If scores for multiple SCCs are equal they will be sorted from most restrictive to least restrictive. If both priorities and restrictions are equal the SCCs will be sorted by name.
type: integer
format: int32
nullable: true
readOnlyRootFilesystem:
description: ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the SCC should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.
type: boolean
requiredDropCapabilities:
description: RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.
type: array
items:
description: Capability represent POSIX capabilities type
type: string
nullable: true
runAsUser:
description: RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext.
type: object
properties:
type:
description: Type is the strategy that will dictate what RunAsUser is used in the SecurityContext.
type: string
uid:
description: UID is the user id that containers must run as. Required for the MustRunAs strategy if not using namespace/service account allocated uids.
type: integer
format: int64
uidRangeMax:
description: UIDRangeMax defines the max value for a strategy that allocates by range.
type: integer
format: int64
uidRangeMin:
description: UIDRangeMin defines the min value for a strategy that allocates by range.
type: integer
format: int64
nullable: true
seLinuxContext:
description: SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext.
type: object
properties:
seLinuxOptions:
description: seLinuxOptions required to run as; required for MustRunAs
type: object
properties:
level:
description: Level is SELinux level label that applies to the container.
type: string
role:
description: Role is a SELinux role label that applies to the container.
type: string
type:
description: Type is a SELinux type label that applies to the container.
type: string
user:
description: User is a SELinux user label that applies to the container.
type: string
type:
description: Type is the strategy that will dictate what SELinux context is used in the SecurityContext.
type: string
nullable: true
seccompProfiles:
description: "SeccompProfiles lists the allowed profiles that may be set for the pod or container's seccomp annotations. An unset (nil) or empty value means that no profiles may be specifid by the pod or container.\tThe wildcard '*' may be used to allow all profiles. When used to generate a value for a pod the first non-wildcard profile will be used as the default."
type: array
items:
type: string
nullable: true
supplementalGroups:
description: SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
type: object
properties:
ranges:
description: Ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end.
type: array
items:
description: 'IDRange provides a min/max of an allowed range of IDs. TODO: this could be reused for UIDs.'
type: object
properties:
max:
description: Max is the end of the range, inclusive.
type: integer
format: int64
min:
description: Min is the start of the range, inclusive.
type: integer
format: int64
type:
description: Type is the strategy that will dictate what supplemental groups is used in the SecurityContext.
type: string
nullable: true
users:
description: The users who have permissions to use this security context constraints
type: array
items:
type: string
nullable: true
volumes:
description: Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*". To allow no volumes, set to ["none"].
type: array
items:
description: FS Type gives strong typing to different file systems that are used by volumes.
type: string
nullable: true
served: true
storage: true

10
vendor/github.com/openshift/api/security/v1/consts.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
package v1
const (
UIDRangeAnnotation = "openshift.io/sa.scc.uid-range"
// SupplementalGroupsAnnotation contains a comma delimited list of allocated supplemental groups
// for the namespace. Groups are in the form of a Block which supports {start}/{length} or {start}-{end}
SupplementalGroupsAnnotation = "openshift.io/sa.scc.supplemental-groups"
MCSAnnotation = "openshift.io/sa.scc.mcs"
ValidatedSCCAnnotation = "openshift.io/scc"
)

8
vendor/github.com/openshift/api/security/v1/doc.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/openshift/origin/pkg/security/apis/security
// +k8s:defaulter-gen=TypeMeta
// +k8s:openapi-gen=true
// +groupName=security.openshift.io
// Package v1 is the v1 version of the API.
package v1

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,372 @@
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = "proto2";
package github.com.openshift.api.security.v1;
import "k8s.io/api/core/v1/generated.proto";
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "v1";
// AllowedFlexVolume represents a single Flexvolume that is allowed to be used.
message AllowedFlexVolume {
// Driver is the name of the Flexvolume driver.
optional string driver = 1;
}
// FSGroupStrategyOptions defines the strategy type and options used to create the strategy.
message FSGroupStrategyOptions {
// Type is the strategy that will dictate what FSGroup is used in the SecurityContext.
optional string type = 1;
// Ranges are the allowed ranges of fs groups. If you would like to force a single
// fs group then supply a single range with the same start and end.
repeated IDRange ranges = 2;
}
// IDRange provides a min/max of an allowed range of IDs.
// TODO: this could be reused for UIDs.
message IDRange {
// Min is the start of the range, inclusive.
optional int64 min = 1;
// Max is the end of the range, inclusive.
optional int64 max = 2;
}
// PodSecurityPolicyReview checks which service accounts (not users, since that would be cluster-wide) can create the `PodTemplateSpec` in question.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
message PodSecurityPolicyReview {
// spec is the PodSecurityPolicy to check.
optional PodSecurityPolicyReviewSpec spec = 1;
// status represents the current information/status for the PodSecurityPolicyReview.
optional PodSecurityPolicyReviewStatus status = 2;
}
// PodSecurityPolicyReviewSpec defines specification for PodSecurityPolicyReview
message PodSecurityPolicyReviewSpec {
// template is the PodTemplateSpec to check. The template.spec.serviceAccountName field is used
// if serviceAccountNames is empty, unless the template.spec.serviceAccountName is empty,
// in which case "default" is used.
// If serviceAccountNames is specified, template.spec.serviceAccountName is ignored.
optional k8s.io.api.core.v1.PodTemplateSpec template = 1;
// serviceAccountNames is an optional set of ServiceAccounts to run the check with.
// If serviceAccountNames is empty, the template.spec.serviceAccountName is used,
// unless it's empty, in which case "default" is used instead.
// If serviceAccountNames is specified, template.spec.serviceAccountName is ignored.
repeated string serviceAccountNames = 2;
}
// PodSecurityPolicyReviewStatus represents the status of PodSecurityPolicyReview.
message PodSecurityPolicyReviewStatus {
// allowedServiceAccounts returns the list of service accounts in *this* namespace that have the power to create the PodTemplateSpec.
repeated ServiceAccountPodSecurityPolicyReviewStatus allowedServiceAccounts = 1;
}
// PodSecurityPolicySelfSubjectReview checks whether this user/SA tuple can create the PodTemplateSpec
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
message PodSecurityPolicySelfSubjectReview {
// spec defines specification the PodSecurityPolicySelfSubjectReview.
optional PodSecurityPolicySelfSubjectReviewSpec spec = 1;
// status represents the current information/status for the PodSecurityPolicySelfSubjectReview.
optional PodSecurityPolicySubjectReviewStatus status = 2;
}
// PodSecurityPolicySelfSubjectReviewSpec contains specification for PodSecurityPolicySelfSubjectReview.
message PodSecurityPolicySelfSubjectReviewSpec {
// template is the PodTemplateSpec to check.
optional k8s.io.api.core.v1.PodTemplateSpec template = 1;
}
// PodSecurityPolicySubjectReview checks whether a particular user/SA tuple can create the PodTemplateSpec.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
message PodSecurityPolicySubjectReview {
// spec defines specification for the PodSecurityPolicySubjectReview.
optional PodSecurityPolicySubjectReviewSpec spec = 1;
// status represents the current information/status for the PodSecurityPolicySubjectReview.
optional PodSecurityPolicySubjectReviewStatus status = 2;
}
// PodSecurityPolicySubjectReviewSpec defines specification for PodSecurityPolicySubjectReview
message PodSecurityPolicySubjectReviewSpec {
// template is the PodTemplateSpec to check. If template.spec.serviceAccountName is empty it will not be defaulted.
// If its non-empty, it will be checked.
optional k8s.io.api.core.v1.PodTemplateSpec template = 1;
// user is the user you're testing for.
// If you specify "user" but not "group", then is it interpreted as "What if user were not a member of any groups.
// If user and groups are empty, then the check is performed using *only* the serviceAccountName in the template.
optional string user = 2;
// groups is the groups you're testing for.
repeated string groups = 3;
}
// PodSecurityPolicySubjectReviewStatus contains information/status for PodSecurityPolicySubjectReview.
message PodSecurityPolicySubjectReviewStatus {
// allowedBy is a reference to the rule that allows the PodTemplateSpec.
// A rule can be a SecurityContextConstraint or a PodSecurityPolicy
// A `nil`, indicates that it was denied.
optional k8s.io.api.core.v1.ObjectReference allowedBy = 1;
// A machine-readable description of why this operation is in the
// "Failure" status. If this value is empty there
// is no information available.
optional string reason = 2;
// template is the PodTemplateSpec after the defaulting is applied.
optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
}
// RangeAllocation is used so we can easily expose a RangeAllocation typed for security group
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
message RangeAllocation {
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// range is a string representing a unique label for a range of uids, "1000000000-2000000000/10000".
optional string range = 2;
// data is a byte array representing the serialized state of a range allocation. It is a bitmap
// with each bit set to one to represent a range is taken.
optional bytes data = 3;
}
// RangeAllocationList is a list of RangeAllocations objects
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
message RangeAllocationList {
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// List of RangeAllocations.
repeated RangeAllocation items = 2;
}
// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.
message RunAsUserStrategyOptions {
// Type is the strategy that will dictate what RunAsUser is used in the SecurityContext.
optional string type = 1;
// UID is the user id that containers must run as. Required for the MustRunAs strategy if not using
// namespace/service account allocated uids.
optional int64 uid = 2;
// UIDRangeMin defines the min value for a strategy that allocates by range.
optional int64 uidRangeMin = 3;
// UIDRangeMax defines the max value for a strategy that allocates by range.
optional int64 uidRangeMax = 4;
}
// SELinuxContextStrategyOptions defines the strategy type and any options used to create the strategy.
message SELinuxContextStrategyOptions {
// Type is the strategy that will dictate what SELinux context is used in the SecurityContext.
optional string type = 1;
// seLinuxOptions required to run as; required for MustRunAs
optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2;
}
// SecurityContextConstraints governs the ability to make requests that affect the SecurityContext
// that will be applied to a container.
// For historical reasons SCC was exposed under the core Kubernetes API group.
// That exposure is deprecated and will be removed in a future release - users
// should instead use the security.openshift.io group to manage
// SecurityContextConstraints.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +kubebuilder:printcolumn:name="Priv",type=string,JSONPath=`.allowPrivilegedContainer`,description="Determines if a container can request to be run as privileged"
// +kubebuilder:printcolumn:name="Caps",type=string,JSONPath=`.allowedCapabilities`,description="A list of capabilities that can be requested to add to the container"
// +kubebuilder:printcolumn:name="SELinux",type=string,JSONPath=`.seLinuxContext.type`,description="Strategy that will dictate what labels will be set in the SecurityContext"
// +kubebuilder:printcolumn:name="RunAsUser",type=string,JSONPath=`.runAsUser.type`,description="Strategy that will dictate what RunAsUser is used in the SecurityContext"
// +kubebuilder:printcolumn:name="FSGroup",type=string,JSONPath=`.fsGroup.type`,description="Strategy that will dictate what fs group is used by the SecurityContext"
// +kubebuilder:printcolumn:name="SupGroup",type=string,JSONPath=`.supplementalGroups.type`,description="Strategy that will dictate what supplemental groups are used by the SecurityContext"
// +kubebuilder:printcolumn:name="Priority",type=string,JSONPath=`.priority`,description="Sort order of SCCs"
// +kubebuilder:printcolumn:name="ReadOnlyRootFS",type=string,JSONPath=`.readOnlyRootFilesystem`,description="Force containers to run with a read only root file system"
// +kubebuilder:printcolumn:name="Volumes",type=string,JSONPath=`.volumes`,description="White list of allowed volume plugins"
// +kubebuilder:singular=securitycontextconstraint
// +openshift:compatibility-gen:level=1
message SecurityContextConstraints {
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// Priority influences the sort order of SCCs when evaluating which SCCs to try first for
// a given pod request based on access in the Users and Groups fields. The higher the int, the
// higher priority. An unset value is considered a 0 priority. If scores
// for multiple SCCs are equal they will be sorted from most restrictive to
// least restrictive. If both priorities and restrictions are equal the
// SCCs will be sorted by name.
// +nullable
optional int32 priority = 2;
// AllowPrivilegedContainer determines if a container can request to be run as privileged.
optional bool allowPrivilegedContainer = 3;
// DefaultAddCapabilities is the default set of capabilities that will be added to the container
// unless the pod spec specifically drops the capability. You may not list a capabiility in both
// DefaultAddCapabilities and RequiredDropCapabilities.
// +nullable
repeated string defaultAddCapabilities = 4;
// RequiredDropCapabilities are the capabilities that will be dropped from the container. These
// are required to be dropped and cannot be added.
// +nullable
repeated string requiredDropCapabilities = 5;
// AllowedCapabilities is a list of capabilities that can be requested to add to the container.
// Capabilities in this field maybe added at the pod author's discretion.
// You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.
// To allow all capabilities you may use '*'.
// +nullable
repeated string allowedCapabilities = 6;
// AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin
// +k8s:conversion-gen=false
optional bool allowHostDirVolumePlugin = 7;
// Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names
// of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*".
// To allow no volumes, set to ["none"].
// +nullable
repeated string volumes = 8;
// AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all
// Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes
// is allowed in the "Volumes" field.
// +optional
// +nullable
repeated AllowedFlexVolume allowedFlexVolumes = 21;
// AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec.
optional bool allowHostNetwork = 9;
// AllowHostPorts determines if the policy allows host ports in the containers.
optional bool allowHostPorts = 10;
// AllowHostPID determines if the policy allows host pid in the containers.
optional bool allowHostPID = 11;
// AllowHostIPC determines if the policy allows host ipc in the containers.
optional bool allowHostIPC = 12;
// DefaultAllowPrivilegeEscalation controls the default setting for whether a
// process can gain more privileges than its parent process.
// +optional
// +nullable
optional bool defaultAllowPrivilegeEscalation = 22;
// AllowPrivilegeEscalation determines if a pod can request to allow
// privilege escalation. If unspecified, defaults to true.
// +optional
// +nullable
optional bool allowPrivilegeEscalation = 23;
// SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext.
// +nullable
optional SELinuxContextStrategyOptions seLinuxContext = 13;
// RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext.
// +nullable
optional RunAsUserStrategyOptions runAsUser = 14;
// SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
// +nullable
optional SupplementalGroupsStrategyOptions supplementalGroups = 15;
// FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.
// +nullable
optional FSGroupStrategyOptions fsGroup = 16;
// ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file
// system. If the container specifically requests to run with a non-read only root file system
// the SCC should deny the pod.
// If set to false the container may run with a read only root file system if it wishes but it
// will not be forced to.
optional bool readOnlyRootFilesystem = 17;
// The users who have permissions to use this security context constraints
// +optional
// +nullable
repeated string users = 18;
// The groups that have permission to use this security context constraints
// +optional
// +nullable
repeated string groups = 19;
// SeccompProfiles lists the allowed profiles that may be set for the pod or
// container's seccomp annotations. An unset (nil) or empty value means that no profiles may
// be specifid by the pod or container. The wildcard '*' may be used to allow all profiles. When
// used to generate a value for a pod the first non-wildcard profile will be used as
// the default.
// +nullable
repeated string seccompProfiles = 20;
// AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none.
// Each entry is either a plain sysctl name or ends in "*" in which case it is considered
// as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed.
// Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.
//
// Examples:
// e.g. "foo/*" allows "foo/bar", "foo/baz", etc.
// e.g. "foo.*" allows "foo.bar", "foo.baz", etc.
// +optional
// +nullable
repeated string allowedUnsafeSysctls = 24;
// ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none.
// Each entry is either a plain sysctl name or ends in "*" in which case it is considered
// as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.
//
// Examples:
// e.g. "foo/*" forbids "foo/bar", "foo/baz", etc.
// e.g. "foo.*" forbids "foo.bar", "foo.baz", etc.
// +optional
// +nullable
repeated string forbiddenSysctls = 25;
}
// SecurityContextConstraintsList is a list of SecurityContextConstraints objects
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
message SecurityContextConstraintsList {
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// List of security context constraints.
repeated SecurityContextConstraints items = 2;
}
// ServiceAccountPodSecurityPolicyReviewStatus represents ServiceAccount name and related review status
message ServiceAccountPodSecurityPolicyReviewStatus {
optional PodSecurityPolicySubjectReviewStatus podSecurityPolicySubjectReviewStatus = 1;
// name contains the allowed and the denied ServiceAccount name
optional string name = 2;
}
// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.
message SupplementalGroupsStrategyOptions {
// Type is the strategy that will dictate what supplemental groups is used in the SecurityContext.
optional string type = 1;
// Ranges are the allowed ranges of supplemental groups. If you would like to force a single
// supplemental group then supply a single range with the same start and end.
repeated IDRange ranges = 2;
}

25
vendor/github.com/openshift/api/security/v1/legacy.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
package v1
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"}
legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme)
DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme
)
func addLegacyKnownTypes(scheme *runtime.Scheme) error {
types := []runtime.Object{
&SecurityContextConstraints{},
&SecurityContextConstraintsList{},
&PodSecurityPolicySubjectReview{},
&PodSecurityPolicySelfSubjectReview{},
&PodSecurityPolicyReview{},
}
scheme.AddKnownTypes(legacyGroupVersion, types...)
return nil
}

View File

@ -0,0 +1,44 @@
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
GroupName = "security.openshift.io"
GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme)
// Install is a function which adds this version to a scheme
Install = schemeBuilder.AddToScheme
// SchemeGroupVersion generated code relies on this name
// Deprecated
SchemeGroupVersion = GroupVersion
// AddToScheme exists solely to keep the old generators creating valid code
// DEPRECATED
AddToScheme = schemeBuilder.AddToScheme
)
// Resource generated code relies on this being here, but it logically belongs to the group
// DEPRECATED
func Resource(resource string) schema.GroupResource {
return schema.GroupResource{Group: GroupName, Resource: resource}
}
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(GroupVersion,
&SecurityContextConstraints{},
&SecurityContextConstraintsList{},
&PodSecurityPolicySubjectReview{},
&PodSecurityPolicySelfSubjectReview{},
&PodSecurityPolicyReview{},
&RangeAllocation{},
&RangeAllocationList{},
)
metav1.AddToGroupVersion(scheme, GroupVersion)
return nil
}

456
vendor/github.com/openshift/api/security/v1/types.go generated vendored Normal file
View File

@ -0,0 +1,456 @@
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// AllowAllCapabilities can be used as a value for the
// SecurityContextConstraints.AllowAllCapabilities field and means that any
// capabilities are allowed to be requested.
var AllowAllCapabilities corev1.Capability = "*"
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SecurityContextConstraints governs the ability to make requests that affect the SecurityContext
// that will be applied to a container.
// For historical reasons SCC was exposed under the core Kubernetes API group.
// That exposure is deprecated and will be removed in a future release - users
// should instead use the security.openshift.io group to manage
// SecurityContextConstraints.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +kubebuilder:printcolumn:name="Priv",type=string,JSONPath=`.allowPrivilegedContainer`,description="Determines if a container can request to be run as privileged"
// +kubebuilder:printcolumn:name="Caps",type=string,JSONPath=`.allowedCapabilities`,description="A list of capabilities that can be requested to add to the container"
// +kubebuilder:printcolumn:name="SELinux",type=string,JSONPath=`.seLinuxContext.type`,description="Strategy that will dictate what labels will be set in the SecurityContext"
// +kubebuilder:printcolumn:name="RunAsUser",type=string,JSONPath=`.runAsUser.type`,description="Strategy that will dictate what RunAsUser is used in the SecurityContext"
// +kubebuilder:printcolumn:name="FSGroup",type=string,JSONPath=`.fsGroup.type`,description="Strategy that will dictate what fs group is used by the SecurityContext"
// +kubebuilder:printcolumn:name="SupGroup",type=string,JSONPath=`.supplementalGroups.type`,description="Strategy that will dictate what supplemental groups are used by the SecurityContext"
// +kubebuilder:printcolumn:name="Priority",type=string,JSONPath=`.priority`,description="Sort order of SCCs"
// +kubebuilder:printcolumn:name="ReadOnlyRootFS",type=string,JSONPath=`.readOnlyRootFilesystem`,description="Force containers to run with a read only root file system"
// +kubebuilder:printcolumn:name="Volumes",type=string,JSONPath=`.volumes`,description="White list of allowed volume plugins"
// +kubebuilder:singular=securitycontextconstraint
// +openshift:compatibility-gen:level=1
type SecurityContextConstraints struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Priority influences the sort order of SCCs when evaluating which SCCs to try first for
// a given pod request based on access in the Users and Groups fields. The higher the int, the
// higher priority. An unset value is considered a 0 priority. If scores
// for multiple SCCs are equal they will be sorted from most restrictive to
// least restrictive. If both priorities and restrictions are equal the
// SCCs will be sorted by name.
// +nullable
Priority *int32 `json:"priority" protobuf:"varint,2,opt,name=priority"`
// AllowPrivilegedContainer determines if a container can request to be run as privileged.
AllowPrivilegedContainer bool `json:"allowPrivilegedContainer" protobuf:"varint,3,opt,name=allowPrivilegedContainer"`
// DefaultAddCapabilities is the default set of capabilities that will be added to the container
// unless the pod spec specifically drops the capability. You may not list a capabiility in both
// DefaultAddCapabilities and RequiredDropCapabilities.
// +nullable
DefaultAddCapabilities []corev1.Capability `json:"defaultAddCapabilities" protobuf:"bytes,4,rep,name=defaultAddCapabilities,casttype=Capability"`
// RequiredDropCapabilities are the capabilities that will be dropped from the container. These
// are required to be dropped and cannot be added.
// +nullable
RequiredDropCapabilities []corev1.Capability `json:"requiredDropCapabilities" protobuf:"bytes,5,rep,name=requiredDropCapabilities,casttype=Capability"`
// AllowedCapabilities is a list of capabilities that can be requested to add to the container.
// Capabilities in this field maybe added at the pod author's discretion.
// You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.
// To allow all capabilities you may use '*'.
// +nullable
AllowedCapabilities []corev1.Capability `json:"allowedCapabilities" protobuf:"bytes,6,rep,name=allowedCapabilities,casttype=Capability"`
// AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin
// +k8s:conversion-gen=false
AllowHostDirVolumePlugin bool `json:"allowHostDirVolumePlugin" protobuf:"varint,7,opt,name=allowHostDirVolumePlugin"`
// Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names
// of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*".
// To allow no volumes, set to ["none"].
// +nullable
Volumes []FSType `json:"volumes" protobuf:"bytes,8,rep,name=volumes,casttype=FSType"`
// AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all
// Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes
// is allowed in the "Volumes" field.
// +optional
// +nullable
AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,21,rep,name=allowedFlexVolumes"`
// AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec.
AllowHostNetwork bool `json:"allowHostNetwork" protobuf:"varint,9,opt,name=allowHostNetwork"`
// AllowHostPorts determines if the policy allows host ports in the containers.
AllowHostPorts bool `json:"allowHostPorts" protobuf:"varint,10,opt,name=allowHostPorts"`
// AllowHostPID determines if the policy allows host pid in the containers.
AllowHostPID bool `json:"allowHostPID" protobuf:"varint,11,opt,name=allowHostPID"`
// AllowHostIPC determines if the policy allows host ipc in the containers.
AllowHostIPC bool `json:"allowHostIPC" protobuf:"varint,12,opt,name=allowHostIPC"`
// DefaultAllowPrivilegeEscalation controls the default setting for whether a
// process can gain more privileges than its parent process.
// +optional
// +nullable
DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,22,rep,name=defaultAllowPrivilegeEscalation"`
// AllowPrivilegeEscalation determines if a pod can request to allow
// privilege escalation. If unspecified, defaults to true.
// +optional
// +nullable
AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,23,rep,name=allowPrivilegeEscalation"`
// SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext.
// +nullable
SELinuxContext SELinuxContextStrategyOptions `json:"seLinuxContext,omitempty" protobuf:"bytes,13,opt,name=seLinuxContext"`
// RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext.
// +nullable
RunAsUser RunAsUserStrategyOptions `json:"runAsUser,omitempty" protobuf:"bytes,14,opt,name=runAsUser"`
// SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
// +nullable
SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups,omitempty" protobuf:"bytes,15,opt,name=supplementalGroups"`
// FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.
// +nullable
FSGroup FSGroupStrategyOptions `json:"fsGroup,omitempty" protobuf:"bytes,16,opt,name=fsGroup"`
// ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file
// system. If the container specifically requests to run with a non-read only root file system
// the SCC should deny the pod.
// If set to false the container may run with a read only root file system if it wishes but it
// will not be forced to.
ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem" protobuf:"varint,17,opt,name=readOnlyRootFilesystem"`
// The users who have permissions to use this security context constraints
// +optional
// +nullable
Users []string `json:"users" protobuf:"bytes,18,rep,name=users"`
// The groups that have permission to use this security context constraints
// +optional
// +nullable
Groups []string `json:"groups" protobuf:"bytes,19,rep,name=groups"`
// SeccompProfiles lists the allowed profiles that may be set for the pod or
// container's seccomp annotations. An unset (nil) or empty value means that no profiles may
// be specifid by the pod or container. The wildcard '*' may be used to allow all profiles. When
// used to generate a value for a pod the first non-wildcard profile will be used as
// the default.
// +nullable
SeccompProfiles []string `json:"seccompProfiles,omitempty" protobuf:"bytes,20,opt,name=seccompProfiles"`
// AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none.
// Each entry is either a plain sysctl name or ends in "*" in which case it is considered
// as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed.
// Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.
//
// Examples:
// e.g. "foo/*" allows "foo/bar", "foo/baz", etc.
// e.g. "foo.*" allows "foo.bar", "foo.baz", etc.
// +optional
// +nullable
AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,24,rep,name=allowedUnsafeSysctls"`
// ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none.
// Each entry is either a plain sysctl name or ends in "*" in which case it is considered
// as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.
//
// Examples:
// e.g. "foo/*" forbids "foo/bar", "foo/baz", etc.
// e.g. "foo.*" forbids "foo.bar", "foo.baz", etc.
// +optional
// +nullable
ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,25,rep,name=forbiddenSysctls"`
}
// FS Type gives strong typing to different file systems that are used by volumes.
type FSType string
var (
FSTypeAzureFile FSType = "azureFile"
FSTypeAzureDisk FSType = "azureDisk"
FSTypeFlocker FSType = "flocker"
FSTypeFlexVolume FSType = "flexVolume"
FSTypeHostPath FSType = "hostPath"
FSTypeEmptyDir FSType = "emptyDir"
FSTypeGCEPersistentDisk FSType = "gcePersistentDisk"
FSTypeAWSElasticBlockStore FSType = "awsElasticBlockStore"
FSTypeGitRepo FSType = "gitRepo"
FSTypeSecret FSType = "secret"
FSTypeNFS FSType = "nfs"
FSTypeISCSI FSType = "iscsi"
FSTypeGlusterfs FSType = "glusterfs"
FSTypePersistentVolumeClaim FSType = "persistentVolumeClaim"
FSTypeRBD FSType = "rbd"
FSTypeCinder FSType = "cinder"
FSTypeCephFS FSType = "cephFS"
FSTypeDownwardAPI FSType = "downwardAPI"
FSTypeFC FSType = "fc"
FSTypeConfigMap FSType = "configMap"
FSTypeVsphereVolume FSType = "vsphere"
FSTypeQuobyte FSType = "quobyte"
FSTypePhotonPersistentDisk FSType = "photonPersistentDisk"
FSProjected FSType = "projected"
FSPortworxVolume FSType = "portworxVolume"
FSScaleIO FSType = "scaleIO"
FSStorageOS FSType = "storageOS"
FSTypeCSI FSType = "csi"
FSTypeEphemeral FSType = "ephemeral"
FSTypeAll FSType = "*"
FSTypeNone FSType = "none"
)
// AllowedFlexVolume represents a single Flexvolume that is allowed to be used.
type AllowedFlexVolume struct {
// Driver is the name of the Flexvolume driver.
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
}
// SELinuxContextStrategyOptions defines the strategy type and any options used to create the strategy.
type SELinuxContextStrategyOptions struct {
// Type is the strategy that will dictate what SELinux context is used in the SecurityContext.
Type SELinuxContextStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=SELinuxContextStrategyType"`
// seLinuxOptions required to run as; required for MustRunAs
SELinuxOptions *corev1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"`
}
// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.
type RunAsUserStrategyOptions struct {
// Type is the strategy that will dictate what RunAsUser is used in the SecurityContext.
Type RunAsUserStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=RunAsUserStrategyType"`
// UID is the user id that containers must run as. Required for the MustRunAs strategy if not using
// namespace/service account allocated uids.
UID *int64 `json:"uid,omitempty" protobuf:"varint,2,opt,name=uid"`
// UIDRangeMin defines the min value for a strategy that allocates by range.
UIDRangeMin *int64 `json:"uidRangeMin,omitempty" protobuf:"varint,3,opt,name=uidRangeMin"`
// UIDRangeMax defines the max value for a strategy that allocates by range.
UIDRangeMax *int64 `json:"uidRangeMax,omitempty" protobuf:"varint,4,opt,name=uidRangeMax"`
}
// FSGroupStrategyOptions defines the strategy type and options used to create the strategy.
type FSGroupStrategyOptions struct {
// Type is the strategy that will dictate what FSGroup is used in the SecurityContext.
Type FSGroupStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=FSGroupStrategyType"`
// Ranges are the allowed ranges of fs groups. If you would like to force a single
// fs group then supply a single range with the same start and end.
Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
}
// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.
type SupplementalGroupsStrategyOptions struct {
// Type is the strategy that will dictate what supplemental groups is used in the SecurityContext.
Type SupplementalGroupsStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=SupplementalGroupsStrategyType"`
// Ranges are the allowed ranges of supplemental groups. If you would like to force a single
// supplemental group then supply a single range with the same start and end.
Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
}
// IDRange provides a min/max of an allowed range of IDs.
// TODO: this could be reused for UIDs.
type IDRange struct {
// Min is the start of the range, inclusive.
Min int64 `json:"min,omitempty" protobuf:"varint,1,opt,name=min"`
// Max is the end of the range, inclusive.
Max int64 `json:"max,omitempty" protobuf:"varint,2,opt,name=max"`
}
// SELinuxContextStrategyType denotes strategy types for generating SELinux options for a
// SecurityContext
type SELinuxContextStrategyType string
// RunAsUserStrategyType denotes strategy types for generating RunAsUser values for a
// SecurityContext
type RunAsUserStrategyType string
// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental
// groups for a SecurityContext.
type SupplementalGroupsStrategyType string
// FSGroupStrategyType denotes strategy types for generating FSGroup values for a
// SecurityContext
type FSGroupStrategyType string
const (
// container must have SELinux labels of X applied.
SELinuxStrategyMustRunAs SELinuxContextStrategyType = "MustRunAs"
// container may make requests for any SELinux context labels.
SELinuxStrategyRunAsAny SELinuxContextStrategyType = "RunAsAny"
// container must run as a particular uid.
RunAsUserStrategyMustRunAs RunAsUserStrategyType = "MustRunAs"
// container must run as a particular uid.
RunAsUserStrategyMustRunAsRange RunAsUserStrategyType = "MustRunAsRange"
// container must run as a non-root uid
RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategyType = "MustRunAsNonRoot"
// container may make requests for any uid.
RunAsUserStrategyRunAsAny RunAsUserStrategyType = "RunAsAny"
// container must have FSGroup of X applied.
FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs"
// container may make requests for any FSGroup labels.
FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny"
// container must run as a particular gid.
SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs"
// container may make requests for any gid.
SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SecurityContextConstraintsList is a list of SecurityContextConstraints objects
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type SecurityContextConstraintsList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of security context constraints.
Items []SecurityContextConstraints `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +genclient:onlyVerbs=create
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodSecurityPolicySubjectReview checks whether a particular user/SA tuple can create the PodTemplateSpec.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type PodSecurityPolicySubjectReview struct {
metav1.TypeMeta `json:",inline"`
// spec defines specification for the PodSecurityPolicySubjectReview.
Spec PodSecurityPolicySubjectReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"`
// status represents the current information/status for the PodSecurityPolicySubjectReview.
Status PodSecurityPolicySubjectReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
}
// PodSecurityPolicySubjectReviewSpec defines specification for PodSecurityPolicySubjectReview
type PodSecurityPolicySubjectReviewSpec struct {
// template is the PodTemplateSpec to check. If template.spec.serviceAccountName is empty it will not be defaulted.
// If its non-empty, it will be checked.
Template corev1.PodTemplateSpec `json:"template" protobuf:"bytes,1,opt,name=template"`
// user is the user you're testing for.
// If you specify "user" but not "group", then is it interpreted as "What if user were not a member of any groups.
// If user and groups are empty, then the check is performed using *only* the serviceAccountName in the template.
User string `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"`
// groups is the groups you're testing for.
Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"`
}
// PodSecurityPolicySubjectReviewStatus contains information/status for PodSecurityPolicySubjectReview.
type PodSecurityPolicySubjectReviewStatus struct {
// allowedBy is a reference to the rule that allows the PodTemplateSpec.
// A rule can be a SecurityContextConstraint or a PodSecurityPolicy
// A `nil`, indicates that it was denied.
AllowedBy *corev1.ObjectReference `json:"allowedBy,omitempty" protobuf:"bytes,1,opt,name=allowedBy"`
// A machine-readable description of why this operation is in the
// "Failure" status. If this value is empty there
// is no information available.
Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"`
// template is the PodTemplateSpec after the defaulting is applied.
Template corev1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
}
// +genclient
// +genclient:onlyVerbs=create
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodSecurityPolicySelfSubjectReview checks whether this user/SA tuple can create the PodTemplateSpec
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type PodSecurityPolicySelfSubjectReview struct {
metav1.TypeMeta `json:",inline"`
// spec defines specification the PodSecurityPolicySelfSubjectReview.
Spec PodSecurityPolicySelfSubjectReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"`
// status represents the current information/status for the PodSecurityPolicySelfSubjectReview.
Status PodSecurityPolicySubjectReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
}
// PodSecurityPolicySelfSubjectReviewSpec contains specification for PodSecurityPolicySelfSubjectReview.
type PodSecurityPolicySelfSubjectReviewSpec struct {
// template is the PodTemplateSpec to check.
Template corev1.PodTemplateSpec `json:"template" protobuf:"bytes,1,opt,name=template"`
}
// +genclient
// +genclient:onlyVerbs=create
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodSecurityPolicyReview checks which service accounts (not users, since that would be cluster-wide) can create the `PodTemplateSpec` in question.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type PodSecurityPolicyReview struct {
metav1.TypeMeta `json:",inline"`
// spec is the PodSecurityPolicy to check.
Spec PodSecurityPolicyReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"`
// status represents the current information/status for the PodSecurityPolicyReview.
Status PodSecurityPolicyReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
}
// PodSecurityPolicyReviewSpec defines specification for PodSecurityPolicyReview
type PodSecurityPolicyReviewSpec struct {
// template is the PodTemplateSpec to check. The template.spec.serviceAccountName field is used
// if serviceAccountNames is empty, unless the template.spec.serviceAccountName is empty,
// in which case "default" is used.
// If serviceAccountNames is specified, template.spec.serviceAccountName is ignored.
Template corev1.PodTemplateSpec `json:"template" protobuf:"bytes,1,opt,name=template"`
// serviceAccountNames is an optional set of ServiceAccounts to run the check with.
// If serviceAccountNames is empty, the template.spec.serviceAccountName is used,
// unless it's empty, in which case "default" is used instead.
// If serviceAccountNames is specified, template.spec.serviceAccountName is ignored.
ServiceAccountNames []string `json:"serviceAccountNames,omitempty" protobuf:"bytes,2,rep,name=serviceAccountNames"` // TODO: find a way to express 'all service accounts'
}
// PodSecurityPolicyReviewStatus represents the status of PodSecurityPolicyReview.
type PodSecurityPolicyReviewStatus struct {
// allowedServiceAccounts returns the list of service accounts in *this* namespace that have the power to create the PodTemplateSpec.
AllowedServiceAccounts []ServiceAccountPodSecurityPolicyReviewStatus `json:"allowedServiceAccounts" protobuf:"bytes,1,rep,name=allowedServiceAccounts"`
}
// ServiceAccountPodSecurityPolicyReviewStatus represents ServiceAccount name and related review status
type ServiceAccountPodSecurityPolicyReviewStatus struct {
PodSecurityPolicySubjectReviewStatus `json:",inline" protobuf:"bytes,1,opt,name=podSecurityPolicySubjectReviewStatus"`
// name contains the allowed and the denied ServiceAccount name
Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RangeAllocation is used so we can easily expose a RangeAllocation typed for security group
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type RangeAllocation struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// range is a string representing a unique label for a range of uids, "1000000000-2000000000/10000".
Range string `json:"range" protobuf:"bytes,2,opt,name=range"`
// data is a byte array representing the serialized state of a range allocation. It is a bitmap
// with each bit set to one to represent a range is taken.
Data []byte `json:"data" protobuf:"bytes,3,opt,name=data"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RangeAllocationList is a list of RangeAllocations objects
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type RangeAllocationList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of RangeAllocations.
Items []RangeAllocation `json:"items" protobuf:"bytes,2,rep,name=items"`
}

View File

@ -0,0 +1,532 @@
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
import (
corev1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume.
func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume {
if in == nil {
return nil
}
out := new(AllowedFlexVolume)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) {
*out = *in
if in.Ranges != nil {
in, out := &in.Ranges, &out.Ranges
*out = make([]IDRange, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions.
func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions {
if in == nil {
return nil
}
out := new(FSGroupStrategyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IDRange) DeepCopyInto(out *IDRange) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange.
func (in *IDRange) DeepCopy() *IDRange {
if in == nil {
return nil
}
out := new(IDRange)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityPolicyReview) DeepCopyInto(out *PodSecurityPolicyReview) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyReview.
func (in *PodSecurityPolicyReview) DeepCopy() *PodSecurityPolicyReview {
if in == nil {
return nil
}
out := new(PodSecurityPolicyReview)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodSecurityPolicyReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityPolicyReviewSpec) DeepCopyInto(out *PodSecurityPolicyReviewSpec) {
*out = *in
in.Template.DeepCopyInto(&out.Template)
if in.ServiceAccountNames != nil {
in, out := &in.ServiceAccountNames, &out.ServiceAccountNames
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyReviewSpec.
func (in *PodSecurityPolicyReviewSpec) DeepCopy() *PodSecurityPolicyReviewSpec {
if in == nil {
return nil
}
out := new(PodSecurityPolicyReviewSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityPolicyReviewStatus) DeepCopyInto(out *PodSecurityPolicyReviewStatus) {
*out = *in
if in.AllowedServiceAccounts != nil {
in, out := &in.AllowedServiceAccounts, &out.AllowedServiceAccounts
*out = make([]ServiceAccountPodSecurityPolicyReviewStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyReviewStatus.
func (in *PodSecurityPolicyReviewStatus) DeepCopy() *PodSecurityPolicyReviewStatus {
if in == nil {
return nil
}
out := new(PodSecurityPolicyReviewStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityPolicySelfSubjectReview) DeepCopyInto(out *PodSecurityPolicySelfSubjectReview) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySelfSubjectReview.
func (in *PodSecurityPolicySelfSubjectReview) DeepCopy() *PodSecurityPolicySelfSubjectReview {
if in == nil {
return nil
}
out := new(PodSecurityPolicySelfSubjectReview)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodSecurityPolicySelfSubjectReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityPolicySelfSubjectReviewSpec) DeepCopyInto(out *PodSecurityPolicySelfSubjectReviewSpec) {
*out = *in
in.Template.DeepCopyInto(&out.Template)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySelfSubjectReviewSpec.
func (in *PodSecurityPolicySelfSubjectReviewSpec) DeepCopy() *PodSecurityPolicySelfSubjectReviewSpec {
if in == nil {
return nil
}
out := new(PodSecurityPolicySelfSubjectReviewSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityPolicySubjectReview) DeepCopyInto(out *PodSecurityPolicySubjectReview) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySubjectReview.
func (in *PodSecurityPolicySubjectReview) DeepCopy() *PodSecurityPolicySubjectReview {
if in == nil {
return nil
}
out := new(PodSecurityPolicySubjectReview)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodSecurityPolicySubjectReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityPolicySubjectReviewSpec) DeepCopyInto(out *PodSecurityPolicySubjectReviewSpec) {
*out = *in
in.Template.DeepCopyInto(&out.Template)
if in.Groups != nil {
in, out := &in.Groups, &out.Groups
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySubjectReviewSpec.
func (in *PodSecurityPolicySubjectReviewSpec) DeepCopy() *PodSecurityPolicySubjectReviewSpec {
if in == nil {
return nil
}
out := new(PodSecurityPolicySubjectReviewSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityPolicySubjectReviewStatus) DeepCopyInto(out *PodSecurityPolicySubjectReviewStatus) {
*out = *in
if in.AllowedBy != nil {
in, out := &in.AllowedBy, &out.AllowedBy
*out = new(corev1.ObjectReference)
**out = **in
}
in.Template.DeepCopyInto(&out.Template)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySubjectReviewStatus.
func (in *PodSecurityPolicySubjectReviewStatus) DeepCopy() *PodSecurityPolicySubjectReviewStatus {
if in == nil {
return nil
}
out := new(PodSecurityPolicySubjectReviewStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RangeAllocation) DeepCopyInto(out *RangeAllocation) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Data != nil {
in, out := &in.Data, &out.Data
*out = make([]byte, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocation.
func (in *RangeAllocation) DeepCopy() *RangeAllocation {
if in == nil {
return nil
}
out := new(RangeAllocation)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RangeAllocation) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RangeAllocationList) DeepCopyInto(out *RangeAllocationList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]RangeAllocation, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocationList.
func (in *RangeAllocationList) DeepCopy() *RangeAllocationList {
if in == nil {
return nil
}
out := new(RangeAllocationList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RangeAllocationList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) {
*out = *in
if in.UID != nil {
in, out := &in.UID, &out.UID
*out = new(int64)
**out = **in
}
if in.UIDRangeMin != nil {
in, out := &in.UIDRangeMin, &out.UIDRangeMin
*out = new(int64)
**out = **in
}
if in.UIDRangeMax != nil {
in, out := &in.UIDRangeMax, &out.UIDRangeMax
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions.
func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions {
if in == nil {
return nil
}
out := new(RunAsUserStrategyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SELinuxContextStrategyOptions) DeepCopyInto(out *SELinuxContextStrategyOptions) {
*out = *in
if in.SELinuxOptions != nil {
in, out := &in.SELinuxOptions, &out.SELinuxOptions
*out = new(corev1.SELinuxOptions)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxContextStrategyOptions.
func (in *SELinuxContextStrategyOptions) DeepCopy() *SELinuxContextStrategyOptions {
if in == nil {
return nil
}
out := new(SELinuxContextStrategyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecurityContextConstraints) DeepCopyInto(out *SecurityContextConstraints) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Priority != nil {
in, out := &in.Priority, &out.Priority
*out = new(int32)
**out = **in
}
if in.DefaultAddCapabilities != nil {
in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities
*out = make([]corev1.Capability, len(*in))
copy(*out, *in)
}
if in.RequiredDropCapabilities != nil {
in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities
*out = make([]corev1.Capability, len(*in))
copy(*out, *in)
}
if in.AllowedCapabilities != nil {
in, out := &in.AllowedCapabilities, &out.AllowedCapabilities
*out = make([]corev1.Capability, len(*in))
copy(*out, *in)
}
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make([]FSType, len(*in))
copy(*out, *in)
}
if in.AllowedFlexVolumes != nil {
in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes
*out = make([]AllowedFlexVolume, len(*in))
copy(*out, *in)
}
if in.DefaultAllowPrivilegeEscalation != nil {
in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation
*out = new(bool)
**out = **in
}
if in.AllowPrivilegeEscalation != nil {
in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation
*out = new(bool)
**out = **in
}
in.SELinuxContext.DeepCopyInto(&out.SELinuxContext)
in.RunAsUser.DeepCopyInto(&out.RunAsUser)
in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups)
in.FSGroup.DeepCopyInto(&out.FSGroup)
if in.Users != nil {
in, out := &in.Users, &out.Users
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Groups != nil {
in, out := &in.Groups, &out.Groups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SeccompProfiles != nil {
in, out := &in.SeccompProfiles, &out.SeccompProfiles
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.AllowedUnsafeSysctls != nil {
in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ForbiddenSysctls != nil {
in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContextConstraints.
func (in *SecurityContextConstraints) DeepCopy() *SecurityContextConstraints {
if in == nil {
return nil
}
out := new(SecurityContextConstraints)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SecurityContextConstraints) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecurityContextConstraintsList) DeepCopyInto(out *SecurityContextConstraintsList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]SecurityContextConstraints, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContextConstraintsList.
func (in *SecurityContextConstraintsList) DeepCopy() *SecurityContextConstraintsList {
if in == nil {
return nil
}
out := new(SecurityContextConstraintsList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SecurityContextConstraintsList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceAccountPodSecurityPolicyReviewStatus) DeepCopyInto(out *ServiceAccountPodSecurityPolicyReviewStatus) {
*out = *in
in.PodSecurityPolicySubjectReviewStatus.DeepCopyInto(&out.PodSecurityPolicySubjectReviewStatus)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountPodSecurityPolicyReviewStatus.
func (in *ServiceAccountPodSecurityPolicyReviewStatus) DeepCopy() *ServiceAccountPodSecurityPolicyReviewStatus {
if in == nil {
return nil
}
out := new(ServiceAccountPodSecurityPolicyReviewStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) {
*out = *in
if in.Ranges != nil {
in, out := &in.Ranges, &out.Ranges
*out = make([]IDRange, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions.
func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions {
if in == nil {
return nil
}
out := new(SupplementalGroupsStrategyOptions)
in.DeepCopyInto(out)
return out
}

View File

@ -0,0 +1,224 @@
package v1
// This file contains a collection of methods that can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored.
//
// Those methods can be generated by using hack/update-swagger-docs.sh
// AUTO-GENERATED FUNCTIONS START HERE
var map_AllowedFlexVolume = map[string]string{
"": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.",
"driver": "Driver is the name of the Flexvolume driver.",
}
func (AllowedFlexVolume) SwaggerDoc() map[string]string {
return map_AllowedFlexVolume
}
var map_FSGroupStrategyOptions = map[string]string{
"": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.",
"type": "Type is the strategy that will dictate what FSGroup is used in the SecurityContext.",
"ranges": "Ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end.",
}
func (FSGroupStrategyOptions) SwaggerDoc() map[string]string {
return map_FSGroupStrategyOptions
}
var map_IDRange = map[string]string{
"": "IDRange provides a min/max of an allowed range of IDs.",
"min": "Min is the start of the range, inclusive.",
"max": "Max is the end of the range, inclusive.",
}
func (IDRange) SwaggerDoc() map[string]string {
return map_IDRange
}
var map_PodSecurityPolicyReview = map[string]string{
"": "PodSecurityPolicyReview checks which service accounts (not users, since that would be cluster-wide) can create the `PodTemplateSpec` in question.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"spec": "spec is the PodSecurityPolicy to check.",
"status": "status represents the current information/status for the PodSecurityPolicyReview.",
}
func (PodSecurityPolicyReview) SwaggerDoc() map[string]string {
return map_PodSecurityPolicyReview
}
var map_PodSecurityPolicyReviewSpec = map[string]string{
"": "PodSecurityPolicyReviewSpec defines specification for PodSecurityPolicyReview",
"template": "template is the PodTemplateSpec to check. The template.spec.serviceAccountName field is used if serviceAccountNames is empty, unless the template.spec.serviceAccountName is empty, in which case \"default\" is used. If serviceAccountNames is specified, template.spec.serviceAccountName is ignored.",
"serviceAccountNames": "serviceAccountNames is an optional set of ServiceAccounts to run the check with. If serviceAccountNames is empty, the template.spec.serviceAccountName is used, unless it's empty, in which case \"default\" is used instead. If serviceAccountNames is specified, template.spec.serviceAccountName is ignored.",
}
func (PodSecurityPolicyReviewSpec) SwaggerDoc() map[string]string {
return map_PodSecurityPolicyReviewSpec
}
var map_PodSecurityPolicyReviewStatus = map[string]string{
"": "PodSecurityPolicyReviewStatus represents the status of PodSecurityPolicyReview.",
"allowedServiceAccounts": "allowedServiceAccounts returns the list of service accounts in *this* namespace that have the power to create the PodTemplateSpec.",
}
func (PodSecurityPolicyReviewStatus) SwaggerDoc() map[string]string {
return map_PodSecurityPolicyReviewStatus
}
var map_PodSecurityPolicySelfSubjectReview = map[string]string{
"": "PodSecurityPolicySelfSubjectReview checks whether this user/SA tuple can create the PodTemplateSpec\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"spec": "spec defines specification the PodSecurityPolicySelfSubjectReview.",
"status": "status represents the current information/status for the PodSecurityPolicySelfSubjectReview.",
}
func (PodSecurityPolicySelfSubjectReview) SwaggerDoc() map[string]string {
return map_PodSecurityPolicySelfSubjectReview
}
var map_PodSecurityPolicySelfSubjectReviewSpec = map[string]string{
"": "PodSecurityPolicySelfSubjectReviewSpec contains specification for PodSecurityPolicySelfSubjectReview.",
"template": "template is the PodTemplateSpec to check.",
}
func (PodSecurityPolicySelfSubjectReviewSpec) SwaggerDoc() map[string]string {
return map_PodSecurityPolicySelfSubjectReviewSpec
}
var map_PodSecurityPolicySubjectReview = map[string]string{
"": "PodSecurityPolicySubjectReview checks whether a particular user/SA tuple can create the PodTemplateSpec.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"spec": "spec defines specification for the PodSecurityPolicySubjectReview.",
"status": "status represents the current information/status for the PodSecurityPolicySubjectReview.",
}
func (PodSecurityPolicySubjectReview) SwaggerDoc() map[string]string {
return map_PodSecurityPolicySubjectReview
}
var map_PodSecurityPolicySubjectReviewSpec = map[string]string{
"": "PodSecurityPolicySubjectReviewSpec defines specification for PodSecurityPolicySubjectReview",
"template": "template is the PodTemplateSpec to check. If template.spec.serviceAccountName is empty it will not be defaulted. If its non-empty, it will be checked.",
"user": "user is the user you're testing for. If you specify \"user\" but not \"group\", then is it interpreted as \"What if user were not a member of any groups. If user and groups are empty, then the check is performed using *only* the serviceAccountName in the template.",
"groups": "groups is the groups you're testing for.",
}
func (PodSecurityPolicySubjectReviewSpec) SwaggerDoc() map[string]string {
return map_PodSecurityPolicySubjectReviewSpec
}
var map_PodSecurityPolicySubjectReviewStatus = map[string]string{
"": "PodSecurityPolicySubjectReviewStatus contains information/status for PodSecurityPolicySubjectReview.",
"allowedBy": "allowedBy is a reference to the rule that allows the PodTemplateSpec. A rule can be a SecurityContextConstraint or a PodSecurityPolicy A `nil`, indicates that it was denied.",
"reason": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available.",
"template": "template is the PodTemplateSpec after the defaulting is applied.",
}
func (PodSecurityPolicySubjectReviewStatus) SwaggerDoc() map[string]string {
return map_PodSecurityPolicySubjectReviewStatus
}
var map_RangeAllocation = map[string]string{
"": "RangeAllocation is used so we can easily expose a RangeAllocation typed for security group\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"range": "range is a string representing a unique label for a range of uids, \"1000000000-2000000000/10000\".",
"data": "data is a byte array representing the serialized state of a range allocation. It is a bitmap with each bit set to one to represent a range is taken.",
}
func (RangeAllocation) SwaggerDoc() map[string]string {
return map_RangeAllocation
}
var map_RangeAllocationList = map[string]string{
"": "RangeAllocationList is a list of RangeAllocations objects\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"items": "List of RangeAllocations.",
}
func (RangeAllocationList) SwaggerDoc() map[string]string {
return map_RangeAllocationList
}
var map_RunAsUserStrategyOptions = map[string]string{
"": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.",
"type": "Type is the strategy that will dictate what RunAsUser is used in the SecurityContext.",
"uid": "UID is the user id that containers must run as. Required for the MustRunAs strategy if not using namespace/service account allocated uids.",
"uidRangeMin": "UIDRangeMin defines the min value for a strategy that allocates by range.",
"uidRangeMax": "UIDRangeMax defines the max value for a strategy that allocates by range.",
}
func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string {
return map_RunAsUserStrategyOptions
}
var map_SELinuxContextStrategyOptions = map[string]string{
"": "SELinuxContextStrategyOptions defines the strategy type and any options used to create the strategy.",
"type": "Type is the strategy that will dictate what SELinux context is used in the SecurityContext.",
"seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs",
}
func (SELinuxContextStrategyOptions) SwaggerDoc() map[string]string {
return map_SELinuxContextStrategyOptions
}
var map_SecurityContextConstraints = map[string]string{
"": "SecurityContextConstraints governs the ability to make requests that affect the SecurityContext that will be applied to a container. For historical reasons SCC was exposed under the core Kubernetes API group. That exposure is deprecated and will be removed in a future release - users should instead use the security.openshift.io group to manage SecurityContextConstraints.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"priority": "Priority influences the sort order of SCCs when evaluating which SCCs to try first for a given pod request based on access in the Users and Groups fields. The higher the int, the higher priority. An unset value is considered a 0 priority. If scores for multiple SCCs are equal they will be sorted from most restrictive to least restrictive. If both priorities and restrictions are equal the SCCs will be sorted by name.",
"allowPrivilegedContainer": "AllowPrivilegedContainer determines if a container can request to be run as privileged.",
"defaultAddCapabilities": "DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.",
"requiredDropCapabilities": "RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.",
"allowedCapabilities": "AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field maybe added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. To allow all capabilities you may use '*'.",
"allowHostDirVolumePlugin": "AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin",
"volumes": "Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use \"*\". To allow no volumes, set to [\"none\"].",
"allowedFlexVolumes": "AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"Volumes\" field.",
"allowHostNetwork": "AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec.",
"allowHostPorts": "AllowHostPorts determines if the policy allows host ports in the containers.",
"allowHostPID": "AllowHostPID determines if the policy allows host pid in the containers.",
"allowHostIPC": "AllowHostIPC determines if the policy allows host ipc in the containers.",
"defaultAllowPrivilegeEscalation": "DefaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.",
"allowPrivilegeEscalation": "AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.",
"seLinuxContext": "SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext.",
"runAsUser": "RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext.",
"supplementalGroups": "SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.",
"fsGroup": "FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.",
"readOnlyRootFilesystem": "ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the SCC should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.",
"users": "The users who have permissions to use this security context constraints",
"groups": "The groups that have permission to use this security context constraints",
"seccompProfiles": "SeccompProfiles lists the allowed profiles that may be set for the pod or container's seccomp annotations. An unset (nil) or empty value means that no profiles may be specifid by the pod or container.\tThe wildcard '*' may be used to allow all profiles. When used to generate a value for a pod the first non-wildcard profile will be used as the default.",
"allowedUnsafeSysctls": "AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.",
"forbiddenSysctls": "ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.",
}
func (SecurityContextConstraints) SwaggerDoc() map[string]string {
return map_SecurityContextConstraints
}
var map_SecurityContextConstraintsList = map[string]string{
"": "SecurityContextConstraintsList is a list of SecurityContextConstraints objects\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"items": "List of security context constraints.",
}
func (SecurityContextConstraintsList) SwaggerDoc() map[string]string {
return map_SecurityContextConstraintsList
}
var map_ServiceAccountPodSecurityPolicyReviewStatus = map[string]string{
"": "ServiceAccountPodSecurityPolicyReviewStatus represents ServiceAccount name and related review status",
"name": "name contains the allowed and the denied ServiceAccount name",
}
func (ServiceAccountPodSecurityPolicyReviewStatus) SwaggerDoc() map[string]string {
return map_ServiceAccountPodSecurityPolicyReviewStatus
}
var map_SupplementalGroupsStrategyOptions = map[string]string{
"": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.",
"type": "Type is the strategy that will dictate what supplemental groups is used in the SecurityContext.",
"ranges": "Ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end.",
}
func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string {
return map_SupplementalGroupsStrategyOptions
}
// AUTO-GENERATED FUNCTIONS END HERE

8
vendor/modules.txt vendored
View File

@ -49,6 +49,9 @@ github.com/beorn7/perks/quantile
github.com/blang/semver
# github.com/cenkalti/backoff/v3 v3.0.0
github.com/cenkalti/backoff/v3
# github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000 => ./api
## explicit
github.com/ceph/ceph-csi/api/deploy/ocp
# github.com/ceph/go-ceph v0.11.0
## explicit
github.com/ceph/go-ceph/cephfs/admin
@ -88,6 +91,8 @@ github.com/fatih/color
github.com/felixge/httpsnoop
# github.com/fsnotify/fsnotify v1.4.9
github.com/fsnotify/fsnotify
# github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32
github.com/ghodss/yaml
# github.com/go-logr/logr v0.4.0
github.com/go-logr/logr
# github.com/gogo/protobuf v1.3.2
@ -258,6 +263,8 @@ github.com/opencontainers/go-digest
# github.com/opencontainers/runc v1.0.2
github.com/opencontainers/runc/libcontainer/apparmor
github.com/opencontainers/runc/libcontainer/utils
# github.com/openshift/api v0.0.0-20210927171657-636513e97fda
github.com/openshift/api/security/v1
# github.com/pborman/uuid v1.2.1
## explicit
github.com/pborman/uuid
@ -1057,6 +1064,7 @@ sigs.k8s.io/structured-merge-diff/v4/value
# sigs.k8s.io/yaml v1.2.0
sigs.k8s.io/yaml
# code.cloudfoundry.org/gofileutils => github.com/cloudfoundry/gofileutils v0.0.0-20170111115228-4d0c80011a0f
# github.com/ceph/ceph-csi/api => ./api
# github.com/golang/protobuf => github.com/golang/protobuf v1.4.3
# github.com/hashicorp/vault/sdk => github.com/hashicorp/vault/sdk v0.1.14-0.20201116234512-b4d4137dfe8b
# github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3