mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: update kubernetes dep to 1.24.0
As kubernetes 1.24.0 is released, updating kubernetes dependencies to 1.24.0 updates: #3086 Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
fc1529f268
commit
c4f79d455f
22
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/README.md
generated
vendored
Normal file
22
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/README.md
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
# test/e2e/testing-manifests
|
||||
|
||||
## Embedded Test Data
|
||||
|
||||
In case one needs to use any test fixture inside your tests and those are defined inside this directory, they need to be added to the `//go:embed` directive in `embed.go`.
|
||||
|
||||
For example, if one wants to include this Readme as a test fixture (potential bad idea in reality!),
|
||||
|
||||
```
|
||||
// embed.go
|
||||
|
||||
...
|
||||
//go:embed some other files README.md
|
||||
...
|
||||
```
|
||||
|
||||
This fixture can be accessed in the e2e tests using `test/e2e/framework/testfiles.Read` like
|
||||
`testfiles.Read("test/e2e/testing-manifests/README.md)`.
|
||||
|
||||
This is needed since [migrating to //go:embed from go-bindata][1].
|
||||
|
||||
[1]: https://github.com/kubernetes/kubernetes/pull/99829
|
21
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/cluster-dns/dns-backend-rc.yaml
generated
vendored
Normal file
21
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/cluster-dns/dns-backend-rc.yaml
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: dns-backend
|
||||
labels:
|
||||
name: dns-backend
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
name: dns-backend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: dns-backend
|
||||
spec:
|
||||
containers:
|
||||
- name: dns-backend
|
||||
image: k8s.gcr.io/example-dns-backend:v1
|
||||
ports:
|
||||
- name: backend-port
|
||||
containerPort: 8000
|
9
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/cluster-dns/dns-backend-service.yaml
generated
vendored
Normal file
9
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/cluster-dns/dns-backend-service.yaml
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: dns-backend
|
||||
spec:
|
||||
ports:
|
||||
- port: 8000
|
||||
selector:
|
||||
name: dns-backend
|
16
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/cluster-dns/dns-frontend-pod.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/cluster-dns/dns-frontend-pod.yaml
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: dns-frontend
|
||||
labels:
|
||||
name: dns-frontend
|
||||
spec:
|
||||
containers:
|
||||
- name: dns-frontend
|
||||
image: k8s.gcr.io/example-dns-frontend:v1
|
||||
command:
|
||||
- python
|
||||
- client.py
|
||||
- http://dns-backend.development.svc.cluster.local:8000
|
||||
imagePullPolicy: Always
|
||||
restartPolicy: Never
|
33
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/embed.go
generated
vendored
Normal file
33
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/embed.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testing_manifests
|
||||
|
||||
import (
|
||||
"embed"
|
||||
|
||||
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||
)
|
||||
|
||||
//go:embed cluster-dns flexvolume guestbook kubectl sample-device-plugin.yaml scheduling/nvidia-driver-installer.yaml statefulset storage-csi
|
||||
var e2eTestingManifestsFS embed.FS
|
||||
|
||||
func GetE2ETestingManifestsFS() e2etestfiles.EmbeddedFileSource {
|
||||
return e2etestfiles.EmbeddedFileSource{
|
||||
EmbeddedFS: e2eTestingManifestsFS,
|
||||
Root: "test/e2e/testing-manifests",
|
||||
}
|
||||
}
|
145
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/flexvolume/attachable-with-long-mount
generated
vendored
Normal file
145
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/flexvolume/attachable-with-long-mount
generated
vendored
Normal file
@ -0,0 +1,145 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This driver is especially designed to test a long mounting scenario
|
||||
# which can cause a volume to be detached while mount is in progress.
|
||||
|
||||
|
||||
FLEX_DUMMY_LOG=${FLEX_DUMMY_LOG:-"/tmp/flex-dummy.log"}
|
||||
|
||||
VALID_MNTDEVICE=foo
|
||||
|
||||
# attach always returns one valid mount device so a different device
|
||||
# showing up in a subsequent driver call implies a bug
|
||||
validateMountDeviceOrDie() {
|
||||
MNTDEVICE=$1
|
||||
CALL=$2
|
||||
if [ "$MNTDEVICE" != "$VALID_MNTDEVICE" ]; then
|
||||
log "{\"status\":\"Failure\",\"message\":\"call "${CALL}" expected device "${VALID_MNTDEVICE}", got device "${MNTDEVICE}"\"}"
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
log() {
|
||||
printf "$*" >&1
|
||||
}
|
||||
|
||||
debug() {
|
||||
echo "$(date) $*" >> "${FLEX_DUMMY_LOG}"
|
||||
}
|
||||
|
||||
attach() {
|
||||
debug "attach $@"
|
||||
log "{\"status\":\"Success\",\"device\":\""${VALID_MNTDEVICE}"\"}"
|
||||
exit 0
|
||||
}
|
||||
|
||||
detach() {
|
||||
debug "detach $@"
|
||||
# TODO issue 44737 detach is passed PV name, not mount device
|
||||
log "{\"status\":\"Success\"}"
|
||||
exit 0
|
||||
}
|
||||
|
||||
waitforattach() {
|
||||
debug "waitforattach $@"
|
||||
MNTDEVICE=$1
|
||||
validateMountDeviceOrDie "$MNTDEVICE" "waitforattach"
|
||||
log "{\"status\":\"Success\",\"device\":\""${MNTDEVICE}"\"}"
|
||||
exit 0
|
||||
}
|
||||
|
||||
isattached() {
|
||||
debug "isattached $@"
|
||||
log "{\"status\":\"Success\",\"attached\":true}"
|
||||
exit 0
|
||||
}
|
||||
|
||||
domountdevice() {
|
||||
debug "domountdevice $@"
|
||||
MNTDEVICE=$2
|
||||
validateMountDeviceOrDie "$MNTDEVICE" "domountdevice"
|
||||
MNTPATH=$1
|
||||
mkdir -p ${MNTPATH} >/dev/null 2>&1
|
||||
mount -t tmpfs none ${MNTPATH} >/dev/null 2>&1
|
||||
sleep 120
|
||||
echo "Hello from flexvolume!" >> "${MNTPATH}/index.html"
|
||||
log "{\"status\":\"Success\"}"
|
||||
exit 0
|
||||
}
|
||||
|
||||
unmountdevice() {
|
||||
debug "unmountdevice $@"
|
||||
MNTPATH=$1
|
||||
rm "${MNTPATH}/index.html" >/dev/null 2>&1
|
||||
umount ${MNTPATH} >/dev/null 2>&1
|
||||
log "{\"status\":\"Success\"}"
|
||||
exit 0
|
||||
}
|
||||
|
||||
expandvolume() {
|
||||
debug "expandvolume $@"
|
||||
log "{\"status\":\"Success\"}"
|
||||
exit 0
|
||||
}
|
||||
|
||||
expandfs() {
|
||||
debug "expandfs $@"
|
||||
log "{\"status\":\"Success\"}"
|
||||
exit 0
|
||||
}
|
||||
|
||||
op=$1
|
||||
|
||||
if [ "$op" = "init" ]; then
|
||||
debug "init $@"
|
||||
log "{\"status\":\"Success\",\"capabilities\":{\"attach\":true, \"requiresFSResize\":true}}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
shift
|
||||
|
||||
case "$op" in
|
||||
attach)
|
||||
attach $*
|
||||
;;
|
||||
detach)
|
||||
detach $*
|
||||
;;
|
||||
waitforattach)
|
||||
waitforattach $*
|
||||
;;
|
||||
isattached)
|
||||
isattached $*
|
||||
;;
|
||||
mountdevice)
|
||||
domountdevice $*
|
||||
;;
|
||||
unmountdevice)
|
||||
unmountdevice $*
|
||||
;;
|
||||
expandvolume)
|
||||
expandvolume $*
|
||||
;;
|
||||
expandfs)
|
||||
expandfs $*
|
||||
;;
|
||||
*)
|
||||
log "{\"status\":\"Not supported\"}"
|
||||
exit 0
|
||||
esac
|
||||
|
||||
exit 1
|
70
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/flexvolume/dummy
generated
vendored
Normal file
70
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/flexvolume/dummy
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This driver implements a tmpfs with a pre-populated file index.html.
|
||||
|
||||
FLEX_DUMMY_LOG=${FLEX_DUMMY_LOG:-"/tmp/flex-dummy.log"}
|
||||
|
||||
log() {
|
||||
printf "$*" >&1
|
||||
}
|
||||
|
||||
debug() {
|
||||
echo "$(date) $*" >> "${FLEX_DUMMY_LOG}"
|
||||
}
|
||||
|
||||
domount() {
|
||||
debug "domount $@"
|
||||
MNTPATH=$1
|
||||
mkdir -p ${MNTPATH} >/dev/null 2>&1
|
||||
mount -t tmpfs none ${MNTPATH} >/dev/null 2>&1
|
||||
echo "Hello from flexvolume!" >> "${MNTPATH}/index.html"
|
||||
log "{\"status\":\"Success\"}"
|
||||
exit 0
|
||||
}
|
||||
|
||||
unmount() {
|
||||
debug "unmount $@"
|
||||
MNTPATH=$1
|
||||
rm ${MNTPATH}/index.html >/dev/null 2>&1
|
||||
umount ${MNTPATH} >/dev/null 2>&1
|
||||
log "{\"status\":\"Success\"}"
|
||||
exit 0
|
||||
}
|
||||
|
||||
op=$1
|
||||
|
||||
if [ "$op" = "init" ]; then
|
||||
debug "init $@"
|
||||
log "{\"status\":\"Success\",\"capabilities\":{\"attach\":false}}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
shift
|
||||
|
||||
case "$op" in
|
||||
mount)
|
||||
domount $*
|
||||
;;
|
||||
unmount)
|
||||
unmount $*
|
||||
;;
|
||||
*)
|
||||
log "{\"status\":\"Not supported\"}"
|
||||
exit 0
|
||||
esac
|
||||
|
||||
exit 1
|
143
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/flexvolume/dummy-attachable
generated
vendored
Normal file
143
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/flexvolume/dummy-attachable
generated
vendored
Normal file
@ -0,0 +1,143 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This driver implements a tmpfs with a pre-populated file index.html.
|
||||
# Attach is required, but it is a no-op that always returns success.
|
||||
|
||||
FLEX_DUMMY_LOG=${FLEX_DUMMY_LOG:-"/tmp/flex-dummy.log"}
|
||||
|
||||
VALID_MNTDEVICE=foo
|
||||
|
||||
# attach always returns one valid mount device so a different device
|
||||
# showing up in a subsequent driver call implies a bug
|
||||
validateMountDeviceOrDie() {
|
||||
MNTDEVICE=$1
|
||||
CALL=$2
|
||||
if [ "$MNTDEVICE" != "$VALID_MNTDEVICE" ]; then
|
||||
log "{\"status\":\"Failure\",\"message\":\"call "${CALL}" expected device "${VALID_MNTDEVICE}", got device "${MNTDEVICE}"\"}"
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
log() {
|
||||
printf "$*" >&1
|
||||
}
|
||||
|
||||
debug() {
|
||||
echo "$(date) $*" >> "${FLEX_DUMMY_LOG}"
|
||||
}
|
||||
|
||||
attach() {
|
||||
debug "attach $@"
|
||||
log "{\"status\":\"Success\",\"device\":\""${VALID_MNTDEVICE}"\"}"
|
||||
exit 0
|
||||
}
|
||||
|
||||
detach() {
|
||||
debug "detach $@"
|
||||
# TODO issue 44737 detach is passed PV name, not mount device
|
||||
log "{\"status\":\"Success\"}"
|
||||
exit 0
|
||||
}
|
||||
|
||||
waitforattach() {
|
||||
debug "waitforattach $@"
|
||||
MNTDEVICE=$1
|
||||
validateMountDeviceOrDie "$MNTDEVICE" "waitforattach"
|
||||
log "{\"status\":\"Success\",\"device\":\""${MNTDEVICE}"\"}"
|
||||
exit 0
|
||||
}
|
||||
|
||||
isattached() {
|
||||
debug "isattached $@"
|
||||
log "{\"status\":\"Success\",\"attached\":true}"
|
||||
exit 0
|
||||
}
|
||||
|
||||
domountdevice() {
|
||||
debug "domountdevice $@"
|
||||
MNTDEVICE=$2
|
||||
validateMountDeviceOrDie "$MNTDEVICE" "domountdevice"
|
||||
MNTPATH=$1
|
||||
mkdir -p ${MNTPATH} >/dev/null 2>&1
|
||||
mount -t tmpfs none ${MNTPATH} >/dev/null 2>&1
|
||||
echo "Hello from flexvolume!" >> "${MNTPATH}/index.html"
|
||||
log "{\"status\":\"Success\"}"
|
||||
exit 0
|
||||
}
|
||||
|
||||
unmountdevice() {
|
||||
debug "unmountdevice $@"
|
||||
MNTPATH=$1
|
||||
rm "${MNTPATH}/index.html" >/dev/null 2>&1
|
||||
umount ${MNTPATH} >/dev/null 2>&1
|
||||
log "{\"status\":\"Success\"}"
|
||||
exit 0
|
||||
}
|
||||
|
||||
expandvolume() {
|
||||
debug "expandvolume $@"
|
||||
log "{\"status\":\"Success\"}"
|
||||
exit 0
|
||||
}
|
||||
|
||||
expandfs() {
|
||||
debug "expandfs $@"
|
||||
log "{\"status\":\"Success\"}"
|
||||
exit 0
|
||||
}
|
||||
|
||||
op=$1
|
||||
|
||||
if [ "$op" = "init" ]; then
|
||||
debug "init $@"
|
||||
log "{\"status\":\"Success\",\"capabilities\":{\"attach\":true, \"requiresFSResize\":true}}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
shift
|
||||
|
||||
case "$op" in
|
||||
attach)
|
||||
attach $*
|
||||
;;
|
||||
detach)
|
||||
detach $*
|
||||
;;
|
||||
waitforattach)
|
||||
waitforattach $*
|
||||
;;
|
||||
isattached)
|
||||
isattached $*
|
||||
;;
|
||||
mountdevice)
|
||||
domountdevice $*
|
||||
;;
|
||||
unmountdevice)
|
||||
unmountdevice $*
|
||||
;;
|
||||
expandvolume)
|
||||
expandvolume $*
|
||||
;;
|
||||
expandfs)
|
||||
expandfs $*
|
||||
;;
|
||||
*)
|
||||
log "{\"status\":\"Not supported\"}"
|
||||
exit 0
|
||||
esac
|
||||
|
||||
exit 1
|
28
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/agnhost-primary-deployment.yaml.in
generated
vendored
Normal file
28
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/agnhost-primary-deployment.yaml.in
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: agnhost-primary
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: agnhost
|
||||
role: primary
|
||||
tier: backend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: agnhost
|
||||
role: primary
|
||||
tier: backend
|
||||
spec:
|
||||
containers:
|
||||
- name: primary
|
||||
image: {{.AgnhostImage}}
|
||||
args: [ "guestbook", "--http-port", "6379" ]
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
ports:
|
||||
- containerPort: 6379
|
16
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/agnhost-primary-service.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/agnhost-primary-service.yaml
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: agnhost-primary
|
||||
labels:
|
||||
app: agnhost
|
||||
role: primary
|
||||
tier: backend
|
||||
spec:
|
||||
ports:
|
||||
- port: 6379
|
||||
targetPort: 6379
|
||||
selector:
|
||||
app: agnhost
|
||||
role: primary
|
||||
tier: backend
|
28
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/agnhost-replica-deployment.yaml.in
generated
vendored
Normal file
28
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/agnhost-replica-deployment.yaml.in
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: agnhost-replica
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: agnhost
|
||||
role: replica
|
||||
tier: backend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: agnhost
|
||||
role: replica
|
||||
tier: backend
|
||||
spec:
|
||||
containers:
|
||||
- name: replica
|
||||
image: {{.AgnhostImage}}
|
||||
args: [ "guestbook", "--replicaof", "agnhost-primary", "--http-port", "6379" ]
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
ports:
|
||||
- containerPort: 6379
|
15
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/agnhost-replica-service.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/agnhost-replica-service.yaml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: agnhost-replica
|
||||
labels:
|
||||
app: agnhost
|
||||
role: replica
|
||||
tier: backend
|
||||
spec:
|
||||
ports:
|
||||
- port: 6379
|
||||
selector:
|
||||
app: agnhost
|
||||
role: replica
|
||||
tier: backend
|
26
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/frontend-deployment.yaml.in
generated
vendored
Normal file
26
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/frontend-deployment.yaml.in
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: frontend
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: guestbook
|
||||
tier: frontend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook
|
||||
tier: frontend
|
||||
spec:
|
||||
containers:
|
||||
- name: guestbook-frontend
|
||||
image: {{.AgnhostImage}}
|
||||
args: [ "guestbook", "--backend-port", "6379" ]
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
ports:
|
||||
- containerPort: 80
|
16
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/frontend-service.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/frontend-service.yaml
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: frontend
|
||||
labels:
|
||||
app: guestbook
|
||||
tier: frontend
|
||||
spec:
|
||||
# if your cluster supports it, uncomment the following to automatically create
|
||||
# an external load-balanced IP for the frontend service.
|
||||
# type: LoadBalancer
|
||||
ports:
|
||||
- port: 80
|
||||
selector:
|
||||
app: guestbook
|
||||
tier: frontend
|
29
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/legacy/frontend-controller.yaml
generated
vendored
Normal file
29
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/legacy/frontend-controller.yaml
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: frontend
|
||||
spec:
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook
|
||||
tier: frontend
|
||||
spec:
|
||||
containers:
|
||||
- name: php-redis
|
||||
image: gcr.io/google_samples/gb-frontend:v4
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
env:
|
||||
- name: GET_HOSTS_FROM
|
||||
value: dns
|
||||
# If your cluster config does not include a dns service, then to
|
||||
# instead access environment variables to find service host
|
||||
# info, comment out the 'value: dns' line above, and uncomment the
|
||||
# line below:
|
||||
# value: env
|
||||
ports:
|
||||
- containerPort: 80
|
26
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/legacy/redis-master-controller.yaml
generated
vendored
Normal file
26
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/legacy/redis-master-controller.yaml
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: redis-master
|
||||
labels:
|
||||
app: redis
|
||||
role: master
|
||||
tier: backend
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
role: master
|
||||
tier: backend
|
||||
spec:
|
||||
containers:
|
||||
- name: master
|
||||
image: docker.io/library/redis:5.0.5-alpine
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
ports:
|
||||
- containerPort: 6379
|
37
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/legacy/redis-slave-controller.yaml
generated
vendored
Normal file
37
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/legacy/redis-slave-controller.yaml
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: redis-slave
|
||||
labels:
|
||||
app: redis
|
||||
role: slave
|
||||
tier: backend
|
||||
spec:
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
role: slave
|
||||
tier: backend
|
||||
spec:
|
||||
containers:
|
||||
- name: slave
|
||||
image: docker.io/library/redis:5.0.5-alpine
|
||||
# We are only implementing the dns option of:
|
||||
# https://github.com/kubernetes/examples/blob/97c7ed0eb6555a4b667d2877f965d392e00abc45/guestbook/redis-slave/run.sh
|
||||
command: [ "redis-server", "--slaveof", "redis-master", "6379" ]
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
env:
|
||||
- name: GET_HOSTS_FROM
|
||||
value: dns
|
||||
# If your cluster config does not include a dns service, then to
|
||||
# instead access an environment variable to find the master
|
||||
# service's host, comment out the 'value: dns' line above, and
|
||||
# uncomment the line below:
|
||||
# value: env
|
||||
ports:
|
||||
- containerPort: 6379
|
27
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/redis-master-deployment.yaml.in
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/redis-master-deployment.yaml.in
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: redis-master
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: redis
|
||||
role: master
|
||||
tier: backend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
role: master
|
||||
tier: backend
|
||||
spec:
|
||||
containers:
|
||||
- name: master
|
||||
image: {{.RedisImage}}
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
ports:
|
||||
- containerPort: 6379
|
16
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/redis-master-service.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/redis-master-service.yaml
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: redis-master
|
||||
labels:
|
||||
app: redis
|
||||
role: master
|
||||
tier: backend
|
||||
spec:
|
||||
ports:
|
||||
- port: 6379
|
||||
targetPort: 6379
|
||||
selector:
|
||||
app: redis
|
||||
role: master
|
||||
tier: backend
|
38
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/redis-slave-deployment.yaml.in
generated
vendored
Normal file
38
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/redis-slave-deployment.yaml.in
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: redis-slave
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: redis
|
||||
role: slave
|
||||
tier: backend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
role: slave
|
||||
tier: backend
|
||||
spec:
|
||||
containers:
|
||||
- name: slave
|
||||
image: {{.RedisImage}}
|
||||
# We are only implementing the dns option of:
|
||||
# https://github.com/kubernetes/examples/blob/97c7ed0eb6555a4b667d2877f965d392e00abc45/guestbook/redis-slave/run.sh
|
||||
command: [ "redis-server", "--slaveof", "redis-master", "6379" ]
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
env:
|
||||
- name: GET_HOSTS_FROM
|
||||
value: dns
|
||||
# If your cluster config does not include a dns service, then to
|
||||
# instead access an environment variable to find the master
|
||||
# service's host, comment out the 'value: dns' line above, and
|
||||
# uncomment the line below:
|
||||
# value: env
|
||||
ports:
|
||||
- containerPort: 6379
|
15
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/redis-slave-service.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/redis-slave-service.yaml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: redis-slave
|
||||
labels:
|
||||
app: redis
|
||||
role: slave
|
||||
tier: backend
|
||||
spec:
|
||||
ports:
|
||||
- port: 6379
|
||||
selector:
|
||||
app: redis
|
||||
role: slave
|
||||
tier: backend
|
40
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/agnhost-primary-controller.json.in
generated
vendored
Normal file
40
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/agnhost-primary-controller.json.in
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
{
|
||||
"kind":"ReplicationController",
|
||||
"apiVersion":"v1",
|
||||
"metadata":{
|
||||
"name":"agnhost-primary",
|
||||
"labels":{
|
||||
"app":"agnhost",
|
||||
"role":"primary"
|
||||
}
|
||||
},
|
||||
"spec":{
|
||||
"replicas":1,
|
||||
"selector":{
|
||||
"app":"agnhost",
|
||||
"role":"primary"
|
||||
},
|
||||
"template":{
|
||||
"metadata":{
|
||||
"labels":{
|
||||
"app":"agnhost",
|
||||
"role":"primary"
|
||||
}
|
||||
},
|
||||
"spec":{
|
||||
"containers":[
|
||||
{
|
||||
"name":"agnhost-primary",
|
||||
"image": "{{.AgnhostImage}}",
|
||||
"ports":[
|
||||
{
|
||||
"name":"agnhost-server",
|
||||
"containerPort":6379
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
32
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/agnhost-primary-pod.yaml
generated
vendored
Normal file
32
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/agnhost-primary-pod.yaml
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
name: agnhost
|
||||
role: primary
|
||||
name: agnhost-primary
|
||||
spec:
|
||||
containers:
|
||||
- name: primary
|
||||
image: k8s.gcr.io/e2e-test-images/agnhost:2.32
|
||||
env:
|
||||
- name: PRIMARY
|
||||
value: "true"
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
resources:
|
||||
limits:
|
||||
cpu: "0.1"
|
||||
volumeMounts:
|
||||
- mountPath: /agnhost-primary-data
|
||||
name: data
|
||||
- name: sentinel
|
||||
image: k8s.gcr.io/e2e-test-images/agnhost:2.32
|
||||
env:
|
||||
- name: SENTINEL
|
||||
value: "true"
|
||||
ports:
|
||||
- containerPort: 26379
|
||||
volumes:
|
||||
- name: data
|
||||
emptyDir: {}
|
23
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/agnhost-primary-service.json
generated
vendored
Normal file
23
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/agnhost-primary-service.json
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
{
|
||||
"kind": "Service",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "agnhost-primary",
|
||||
"labels": {
|
||||
"app": "agnhost",
|
||||
"role": "primary"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"ports": [
|
||||
{
|
||||
"port": 6379,
|
||||
"targetPort": "agnhost-server"
|
||||
}
|
||||
],
|
||||
"selector": {
|
||||
"app": "agnhost",
|
||||
"role": "primary"
|
||||
}
|
||||
}
|
||||
}
|
21
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/busybox-cronjob.yaml.in
generated
vendored
Normal file
21
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/busybox-cronjob.yaml.in
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: cronjob-test
|
||||
spec:
|
||||
schedule: "*/1 * * * *"
|
||||
concurrencyPolicy: Allow
|
||||
suspend: false
|
||||
startingDeadlineSeconds: 30
|
||||
successfulJobsHistoryLimit: 3
|
||||
failedJobsHistoryLimit: 1
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: test
|
||||
image: {{.BusyBoxImage}}
|
||||
args:
|
||||
- "/bin/true"
|
||||
restartPolicy: OnFailure
|
13
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/busybox-pod.yaml.in
generated
vendored
Normal file
13
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/busybox-pod.yaml.in
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: busybox1
|
||||
labels:
|
||||
app: busybox1
|
||||
spec:
|
||||
containers:
|
||||
- image: {{.BusyBoxImage}}
|
||||
command: ["/bin/sh", "-c", "mkdir -p /root/foo/bar && echo 'foobar' > /root/foo/bar/foo.bar && sleep 3600"]
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: busybox
|
||||
restartPolicy: Always
|
19
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/httpd-deployment1.yaml.in
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/httpd-deployment1.yaml.in
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: httpd-deployment
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: httpd
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: httpd
|
||||
spec:
|
||||
containers:
|
||||
- name: httpd
|
||||
image: {{.HttpdNewImage}}
|
||||
ports:
|
||||
- containerPort: 80
|
18
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/httpd-deployment2.yaml.in
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/httpd-deployment2.yaml.in
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: httpd-deployment
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: httpd
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: httpd
|
||||
spec:
|
||||
containers:
|
||||
- name: httpd
|
||||
image: {{.HttpdNewImage}}
|
||||
ports:
|
||||
- containerPort: 80
|
18
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/httpd-deployment3.yaml.in
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/httpd-deployment3.yaml.in
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: httpd-deployment
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: httpd
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: httpd
|
||||
spec:
|
||||
containers:
|
||||
- name: httpd
|
||||
image: {{.HttpdImage}}
|
||||
ports:
|
||||
- containerPort: 80
|
16
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/httpd-rc.yaml.in
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/httpd-rc.yaml.in
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: httpd-rc
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
run: httpd-rc
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
run: httpd-rc
|
||||
spec:
|
||||
containers:
|
||||
- image: {{.HttpdNewImage}}
|
||||
name: httpd-rc
|
12
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/pause-pod.yaml.in
generated
vendored
Normal file
12
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/pause-pod.yaml.in
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pause
|
||||
labels:
|
||||
name: pause
|
||||
spec:
|
||||
containers:
|
||||
- name: pause
|
||||
image: {{.PauseImage}}
|
||||
ports:
|
||||
- containerPort: 80
|
18
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/pod-with-readiness-probe.yaml.in
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/pod-with-readiness-probe.yaml.in
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: httpd
|
||||
labels:
|
||||
name: httpd
|
||||
spec:
|
||||
containers:
|
||||
- name: httpd
|
||||
image: {{.HttpdImage}}
|
||||
ports:
|
||||
- containerPort: 80
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 80
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
13
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/pod
generated
vendored
Normal file
13
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/pod
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
# Copy of pod.yaml without file extension for test
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx
|
||||
labels:
|
||||
name: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
14
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/rbd-storage-class.yaml
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/rbd-storage-class.yaml
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: slow
|
||||
provisioner: kubernetes.io/rbd
|
||||
parameters:
|
||||
monitors: 127.0.0.1:6789
|
||||
adminId: admin
|
||||
adminSecretName: ceph-secret-admin
|
||||
adminSecretNamespace: "kube-system"
|
||||
pool: kube
|
||||
userId: kube
|
||||
userSecretName: ceph-secret-user
|
||||
|
50
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/sample-device-plugin.yaml
generated
vendored
Normal file
50
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/sample-device-plugin.yaml
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: sample-device-plugin-beta
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: sample-device-plugin
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: sample-device-plugin
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: sample-device-plugin
|
||||
annotations:
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
tolerations:
|
||||
- operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
- operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
volumes:
|
||||
- name: device-plugin
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/device-plugins
|
||||
- name: plugins-registry-probe-mode
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins_registry
|
||||
- name: dev
|
||||
hostPath:
|
||||
path: /dev
|
||||
containers:
|
||||
- image: k8s.gcr.io/e2e-test-images/sample-device-plugin:1.3
|
||||
name: sample-device-plugin
|
||||
env:
|
||||
- name: PLUGIN_SOCK_DIR
|
||||
value: "/var/lib/kubelet/device-plugins"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: device-plugin
|
||||
mountPath: /var/lib/kubelet/device-plugins
|
||||
- name: plugins-registry-probe-mode
|
||||
mountPath: /var/lib/kubelet/plugins_registry
|
||||
- name: dev
|
||||
mountPath: /dev
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
86
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/scheduling/nvidia-driver-installer.yaml
generated
vendored
Normal file
86
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/scheduling/nvidia-driver-installer.yaml
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
# This DaemonSet was originally referenced from
|
||||
# https://github.com/GoogleCloudPlatform/container-engine-accelerators/blob/master/daemonset.yaml
|
||||
|
||||
# The Dockerfile and other source for this daemonset are in
|
||||
# https://github.com/GoogleCloudPlatform/cos-gpu-installer
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: nvidia-driver-installer
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: nvidia-driver-installer
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: nvidia-driver-installer
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: nvidia-driver-installer
|
||||
k8s-app: nvidia-driver-installer
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: cloud.google.com/gke-accelerator
|
||||
operator: Exists
|
||||
tolerations:
|
||||
- operator: "Exists"
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
volumes:
|
||||
- name: dev
|
||||
hostPath:
|
||||
path: /dev
|
||||
- name: vulkan-icd-mount
|
||||
hostPath:
|
||||
path: /home/kubernetes/bin/nvidia/vulkan/icd.d
|
||||
- name: nvidia-install-dir-host
|
||||
hostPath:
|
||||
path: /home/kubernetes/bin/nvidia
|
||||
- name: root-mount
|
||||
hostPath:
|
||||
path: /
|
||||
initContainers:
|
||||
# The COS GPU installer image version may be dependent on the version of COS being used.
|
||||
# Refer to details about the installer in https://cos.googlesource.com/cos/tools/+/refs/heads/master/src/cmd/cos_gpu_installer/
|
||||
# and the COS release notes (https://cloud.google.com/container-optimized-os/docs/release-notes) to determine version COS GPU installer for a given version of COS.
|
||||
|
||||
# Maps to gcr.io/cos-cloud/cos-gpu-installer:v2.0.3 - suitable for COS M85 as per https://cloud.google.com/container-optimized-os/docs/release-notes#cos-85-13310-1209-3
|
||||
- image: gcr.io/cos-cloud/cos-gpu-installer:v2.0.5
|
||||
name: nvidia-driver-installer
|
||||
resources:
|
||||
requests:
|
||||
cpu: 0.15
|
||||
securityContext:
|
||||
privileged: true
|
||||
env:
|
||||
- name: NVIDIA_INSTALL_DIR_HOST
|
||||
value: /home/kubernetes/bin/nvidia
|
||||
- name: NVIDIA_INSTALL_DIR_CONTAINER
|
||||
value: /usr/local/nvidia
|
||||
- name: VULKAN_ICD_DIR_HOST
|
||||
value: /home/kubernetes/bin/nvidia/vulkan/icd.d
|
||||
- name: VULKAN_ICD_DIR_CONTAINER
|
||||
value: /etc/vulkan/icd.d
|
||||
- name: ROOT_MOUNT_DIR
|
||||
value: /root
|
||||
volumeMounts:
|
||||
- name: nvidia-install-dir-host
|
||||
mountPath: /usr/local/nvidia
|
||||
- name: vulkan-icd-mount
|
||||
mountPath: /etc/vulkan/icd.d
|
||||
- name: dev
|
||||
mountPath: /dev
|
||||
- name: root-mount
|
||||
mountPath: /root
|
||||
containers:
|
||||
- image: "k8s.gcr.io/pause:3.7"
|
||||
name: pause
|
||||
|
58
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/cassandra/controller.yaml
generated
vendored
Normal file
58
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/cassandra/controller.yaml
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: cassandra
|
||||
# The labels will be applied automatically
|
||||
# from the labels in the pod template, if not set
|
||||
# labels:
|
||||
# app: cassandra
|
||||
spec:
|
||||
replicas: 2
|
||||
# The selector will be applied automatically
|
||||
# from the labels in the pod template, if not set.
|
||||
# selector:
|
||||
# app: cassandra
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cassandra
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /run.sh
|
||||
resources:
|
||||
limits:
|
||||
cpu: 0.5
|
||||
env:
|
||||
- name: MAX_HEAP_SIZE
|
||||
value: 512M
|
||||
- name: HEAP_NEWSIZE
|
||||
value: 100M
|
||||
- name: CASSANDRA_SEED_PROVIDER
|
||||
value: "io.k8s.cassandra.KubernetesSeedProvider"
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
image: gcr.io/google-samples/cassandra:v13
|
||||
name: cassandra
|
||||
ports:
|
||||
- containerPort: 7000
|
||||
name: intra-node
|
||||
- containerPort: 7001
|
||||
name: tls-intra-node
|
||||
- containerPort: 7199
|
||||
name: jmx
|
||||
- containerPort: 9042
|
||||
name: cql
|
||||
volumeMounts:
|
||||
- mountPath: /cassandra_data
|
||||
name: data
|
||||
volumes:
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
|
11
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/cassandra/pdb.yaml
generated
vendored
Normal file
11
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/cassandra/pdb.yaml
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: cassandra-pdb
|
||||
labels:
|
||||
pdb: cassandra
|
||||
spec:
|
||||
minAvailable: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: cassandra
|
12
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/cassandra/service.yaml
generated
vendored
Normal file
12
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/cassandra/service.yaml
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: cassandra
|
||||
name: cassandra
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- port: 9042
|
||||
selector:
|
||||
app: cassandra
|
90
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/cassandra/statefulset.yaml
generated
vendored
Normal file
90
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/cassandra/statefulset.yaml
generated
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: cassandra
|
||||
spec:
|
||||
serviceName: cassandra
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: cassandra
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cassandra
|
||||
spec:
|
||||
containers:
|
||||
- name: cassandra
|
||||
image: gcr.io/google-samples/cassandra:v13
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 7000
|
||||
name: intra-node
|
||||
- containerPort: 7001
|
||||
name: tls-intra-node
|
||||
- containerPort: 7199
|
||||
name: jmx
|
||||
- containerPort: 9042
|
||||
name: cql
|
||||
resources:
|
||||
requests:
|
||||
cpu: "300m"
|
||||
memory: 1Gi
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- IPC_LOCK
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- nodetool drain
|
||||
env:
|
||||
- name: MAX_HEAP_SIZE
|
||||
value: 512M
|
||||
- name: HEAP_NEWSIZE
|
||||
value: 100M
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: CASSANDRA_SEEDS
|
||||
value: "cassandra-0.cassandra.$(POD_NAMESPACE).svc.cluster.local"
|
||||
- name: CASSANDRA_CLUSTER_NAME
|
||||
value: "K8Demo"
|
||||
- name: CASSANDRA_DC
|
||||
value: "DC1-K8Demo"
|
||||
- name: CASSANDRA_RACK
|
||||
value: "Rack1-K8Demo"
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- /ready-probe.sh
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 5
|
||||
# These volume mounts are persistent. They are like inline claims,
|
||||
# but not exactly because the names need to match exactly one of
|
||||
# the stateful pod volumes.
|
||||
volumeMounts:
|
||||
- name: cassandra-data
|
||||
mountPath: /cassandra_data
|
||||
# These are converted to volume claims by the controller
|
||||
# and mounted at the paths mentioned above.
|
||||
# do not use these in production until ssd GCEPersistentDisk or other ssd pd
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: cassandra-data
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
|
51
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/cassandra/tester.yaml
generated
vendored
Normal file
51
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/cassandra/tester.yaml
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: cassandra-test-server
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: test-server
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: test-server
|
||||
spec:
|
||||
containers:
|
||||
- name: test-server
|
||||
image: k8s.gcr.io/cassandra-e2e-test:0.1
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
initialDelaySeconds: 2
|
||||
periodSeconds: 2
|
||||
---
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: tester-pdb
|
||||
labels:
|
||||
pdb: test-server
|
||||
spec:
|
||||
minAvailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: test-server
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: test-server
|
||||
name: test-server
|
||||
spec:
|
||||
ports:
|
||||
- port: 8080
|
||||
selector:
|
||||
app: test-server
|
||||
type: LoadBalancer
|
33
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/cockroachdb/service.yaml
generated
vendored
Normal file
33
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/cockroachdb/service.yaml
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
# This service only exists to create DNS entries for each pod in the stateful
|
||||
# set such that they can resolve each other's IP addresses. It does not
|
||||
# create a load-balanced ClusterIP and should not be used directly by clients
|
||||
# in most circumstances.
|
||||
name: cockroachdb
|
||||
labels:
|
||||
app: cockroachdb
|
||||
annotations:
|
||||
# Enable automatic monitoring of all instances when Prometheus is running in the cluster.
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/path: "_status/vars"
|
||||
prometheus.io/port: "8080"
|
||||
spec:
|
||||
ports:
|
||||
- port: 26257
|
||||
targetPort: 26257
|
||||
name: grpc
|
||||
- port: 8080
|
||||
targetPort: 8080
|
||||
name: http
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: cockroachdb
|
||||
# This is needed to make the peer-finder work properly and to help avoid
|
||||
# edge cases where instance 0 comes up after losing its data and needs to
|
||||
# decide whether it should create a new cluster or try to join an existing
|
||||
# one. If it creates a new cluster when it should have joined an existing
|
||||
# one, we'd end up with two separate clusters listening at the same service
|
||||
# endpoint, which would be very bad.
|
||||
publishNotReadyAddresses: true
|
103
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/cockroachdb/statefulset.yaml
generated
vendored
Normal file
103
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/cockroachdb/statefulset.yaml
generated
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: cockroachdb
|
||||
spec:
|
||||
serviceName: "cockroachdb"
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: cockroachdb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cockroachdb
|
||||
spec:
|
||||
# Init containers are run only once in the lifetime of a pod, before
|
||||
# it's started up for the first time. It has to exit successfully
|
||||
# before the pod's main containers are allowed to start.
|
||||
# This particular init container does a DNS lookup for other pods in
|
||||
# the set to help determine whether or not a cluster already exists.
|
||||
# If any other pods exist, it creates a file in the cockroach-data
|
||||
# directory to pass that information along to the primary container that
|
||||
# has to decide what command-line flags to use when starting CockroachDB.
|
||||
# This only matters when a pod's persistent volume is empty - if it has
|
||||
# data from a previous execution, that data will always be used.
|
||||
initContainers:
|
||||
- name: bootstrap
|
||||
image: cockroachdb/cockroach-k8s-init:0.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "-on-start=/on-start.sh"
|
||||
- "-service=cockroachdb"
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
- name: datadir
|
||||
mountPath: "/cockroach/cockroach-data"
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- cockroachdb
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: cockroachdb
|
||||
image: cockroachdb/cockroach:v1.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 26257
|
||||
name: grpc
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
volumeMounts:
|
||||
- name: datadir
|
||||
mountPath: /cockroach/cockroach-data
|
||||
command:
|
||||
- "/bin/bash"
|
||||
- "-ecx"
|
||||
- |
|
||||
# The use of qualified `hostname -f` is crucial:
|
||||
# Other nodes aren't able to look up the unqualified hostname.
|
||||
CRARGS=("start" "--logtostderr" "--insecure" "--host" "$(hostname -f)" "--http-host" "0.0.0.0")
|
||||
# We only want to initialize a new cluster (by omitting the join flag)
|
||||
# if we're sure that we're the first node (i.e. index 0) and that
|
||||
# there aren't any other nodes running as part of the cluster that
|
||||
# this is supposed to be a part of (which indicates that a cluster
|
||||
# already exists and we should make sure not to create a new one).
|
||||
# It's fine to run without --join on a restart if there aren't any
|
||||
# other nodes.
|
||||
if [ ! "$(hostname)" == "cockroachdb-0" ] || \
|
||||
[ -e "/cockroach/cockroach-data/cluster_exists_marker" ]
|
||||
then
|
||||
# We don't join cockroachdb in order to avoid a node attempting
|
||||
# to join itself, which currently doesn't work
|
||||
# (https://github.com/cockroachdb/cockroach/issues/9625).
|
||||
CRARGS+=("--join" "cockroachdb-0.cockroachdb,cockroachdb-1.cockroachdb,cockroachdb-2.cockroachdb")
|
||||
fi
|
||||
exec /cockroach/cockroach ${CRARGS[*]}
|
||||
# No pre-stop hook is required, a SIGTERM plus some time is all that's
|
||||
# needed for graceful shutdown of a node.
|
||||
terminationGracePeriodSeconds: 60
|
||||
volumes:
|
||||
- name: datadir
|
||||
persistentVolumeClaim:
|
||||
claimName: datadir
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: datadir
|
||||
spec:
|
||||
accessModes:
|
||||
- "ReadWriteOnce"
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
11
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/etcd/pdb.yaml
generated
vendored
Normal file
11
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/etcd/pdb.yaml
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: etcd-pdb
|
||||
labels:
|
||||
pdb: etcd
|
||||
spec:
|
||||
minAvailable: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: etcd
|
16
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/etcd/service.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/etcd/service.yaml
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: etcd
|
||||
labels:
|
||||
app: etcd
|
||||
spec:
|
||||
ports:
|
||||
- port: 2380
|
||||
name: etcd-server
|
||||
- port: 2379
|
||||
name: etcd-client
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: etcd
|
||||
publishNotReadyAddresses: true
|
178
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/etcd/statefulset.yaml
generated
vendored
Normal file
178
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/etcd/statefulset.yaml
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: etcd
|
||||
labels:
|
||||
app: etcd
|
||||
spec:
|
||||
serviceName: etcd
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: etcd
|
||||
template:
|
||||
metadata:
|
||||
name: etcd
|
||||
labels:
|
||||
app: etcd
|
||||
spec:
|
||||
containers:
|
||||
- name: etcd
|
||||
image: k8s.gcr.io/etcd:3.2.24
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 2380
|
||||
name: peer
|
||||
- containerPort: 2379
|
||||
name: client
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 512Mi
|
||||
env:
|
||||
- name: INITIAL_CLUSTER_SIZE
|
||||
value: "3"
|
||||
- name: SET_NAME
|
||||
value: etcd
|
||||
volumeMounts:
|
||||
- name: datadir
|
||||
mountPath: /var/run/etcd
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-ec"
|
||||
- |
|
||||
EPS=""
|
||||
for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
|
||||
EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SET_NAME}:2379"
|
||||
done
|
||||
|
||||
HOSTNAME=$(hostname)
|
||||
|
||||
member_hash() {
|
||||
etcdctl member list | grep http://${HOSTNAME}.${SET_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1
|
||||
}
|
||||
|
||||
echo "Removing ${HOSTNAME} from etcd cluster"
|
||||
|
||||
ETCDCTL_ENDPOINT=${EPS} etcdctl member remove $(member_hash)
|
||||
if [ $? -eq 0 ]; then
|
||||
# Remove everything otherwise the cluster will no longer scale-up
|
||||
rm -rf /var/run/etcd/*
|
||||
fi
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-ec"
|
||||
- |
|
||||
HOSTNAME=$(hostname)
|
||||
|
||||
# store member id into PVC for later member replacement
|
||||
collect_member() {
|
||||
while ! etcdctl member list &>/dev/null; do sleep 1; done
|
||||
etcdctl member list | grep http://${HOSTNAME}.${SET_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1 > /var/run/etcd/member_id
|
||||
exit 0
|
||||
}
|
||||
|
||||
eps() {
|
||||
EPS=""
|
||||
for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
|
||||
EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SET_NAME}:2379"
|
||||
done
|
||||
echo ${EPS}
|
||||
}
|
||||
|
||||
member_hash() {
|
||||
etcdctl member list | grep http://${HOSTNAME}.${SET_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1
|
||||
}
|
||||
|
||||
# re-joining after failure?
|
||||
if [ -e /var/run/etcd/default.etcd ]; then
|
||||
echo "Re-joining etcd member"
|
||||
member_id=$(cat /var/run/etcd/member_id)
|
||||
|
||||
# re-join member
|
||||
ETCDCTL_ENDPOINT=$(eps) etcdctl member update ${member_id} http://${HOSTNAME}.${SET_NAME}:2380
|
||||
exec etcd --name ${HOSTNAME} \
|
||||
--listen-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
|
||||
--listen-client-urls http://${HOSTNAME}.${SET_NAME}:2379,http://127.0.0.1:2379 \
|
||||
--advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \
|
||||
--data-dir /var/run/etcd/default.etcd
|
||||
fi
|
||||
|
||||
# etcd-SET_ID
|
||||
SET_ID=${HOSTNAME:5:${#HOSTNAME}}
|
||||
|
||||
# adding a new member to existing cluster (assuming all initial pods are available)
|
||||
if [ "${SET_ID}" -ge ${INITIAL_CLUSTER_SIZE} ]; then
|
||||
export ETCDCTL_ENDPOINT=$(eps)
|
||||
|
||||
# member already added?
|
||||
MEMBER_HASH=$(member_hash)
|
||||
if [ -n "${MEMBER_HASH}" ]; then
|
||||
# the member hash exists but for some reason etcd failed
|
||||
# as the datadir has not be created, we can remove the member
|
||||
# and retrieve new hash
|
||||
etcdctl member remove ${MEMBER_HASH}
|
||||
fi
|
||||
|
||||
echo "Adding new member"
|
||||
etcdctl member add ${HOSTNAME} http://${HOSTNAME}.${SET_NAME}:2380 | grep "^ETCD_" > /var/run/etcd/new_member_envs
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Exiting"
|
||||
rm -f /var/run/etcd/new_member_envs
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cat /var/run/etcd/new_member_envs
|
||||
source /var/run/etcd/new_member_envs
|
||||
|
||||
collect_member &
|
||||
|
||||
exec etcd --name ${HOSTNAME} \
|
||||
--listen-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
|
||||
--listen-client-urls http://${HOSTNAME}.${SET_NAME}:2379,http://127.0.0.1:2379 \
|
||||
--advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \
|
||||
--data-dir /var/run/etcd/default.etcd \
|
||||
--initial-advertise-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
|
||||
--initial-cluster ${ETCD_INITIAL_CLUSTER} \
|
||||
--initial-cluster-state ${ETCD_INITIAL_CLUSTER_STATE}
|
||||
fi
|
||||
|
||||
for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
|
||||
while true; do
|
||||
echo "Waiting for ${SET_NAME}-${i}.${SET_NAME} to come up"
|
||||
ping -W 1 -c 1 ${SET_NAME}-${i}.${SET_NAME} > /dev/null && break
|
||||
sleep 1s
|
||||
done
|
||||
done
|
||||
|
||||
PEERS=""
|
||||
for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
|
||||
PEERS="${PEERS}${PEERS:+,}${SET_NAME}-${i}=http://${SET_NAME}-${i}.${SET_NAME}:2380"
|
||||
done
|
||||
|
||||
collect_member &
|
||||
|
||||
# join member
|
||||
exec etcd --name ${HOSTNAME} \
|
||||
--initial-advertise-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
|
||||
--listen-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
|
||||
--listen-client-urls http://${HOSTNAME}.${SET_NAME}:2379,http://127.0.0.1:2379 \
|
||||
--advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \
|
||||
--initial-cluster-token etcd-cluster-1 \
|
||||
--initial-cluster ${PEERS} \
|
||||
--initial-cluster-state new \
|
||||
--data-dir /var/run/etcd/default.etcd
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: datadir
|
||||
spec:
|
||||
accessModes:
|
||||
- "ReadWriteOnce"
|
||||
resources:
|
||||
requests:
|
||||
# upstream recommended max is 700M
|
||||
storage: 1Gi
|
27
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/etcd/tester.yaml
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/etcd/tester.yaml
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: etcd-test-server
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: test-server
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: test-server
|
||||
spec:
|
||||
containers:
|
||||
- name: test-server
|
||||
image: k8s.gcr.io/etcd-statefulset-e2e-test:0.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
initialDelaySeconds: 2
|
||||
periodSeconds: 2
|
||||
|
16
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/mysql-galera/service.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/mysql-galera/service.yaml
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
# A headless service to create DNS records
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: galera
|
||||
labels:
|
||||
app: mysql
|
||||
spec:
|
||||
ports:
|
||||
- port: 3306
|
||||
name: mysql
|
||||
# *.galear.default.svc.cluster.local
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: mysql
|
||||
publishNotReadyAddresses: true
|
87
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/mysql-galera/statefulset.yaml
generated
vendored
Normal file
87
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/mysql-galera/statefulset.yaml
generated
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: mysql
|
||||
spec:
|
||||
serviceName: "galera"
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: mysql
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: mysql
|
||||
spec:
|
||||
initContainers:
|
||||
- name: install
|
||||
image: k8s.gcr.io/galera-install:0.1
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "--work-dir=/work-dir"
|
||||
volumeMounts:
|
||||
- name: workdir
|
||||
mountPath: "/work-dir"
|
||||
- name: config
|
||||
mountPath: "/etc/mysql"
|
||||
- name: bootstrap
|
||||
image: debian:jessie
|
||||
command:
|
||||
- "/work-dir/peer-finder"
|
||||
args:
|
||||
- -on-start="/work-dir/on-start.sh"
|
||||
- "-service=galera"
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
- name: workdir
|
||||
mountPath: "/work-dir"
|
||||
- name: config
|
||||
mountPath: "/etc/mysql"
|
||||
containers:
|
||||
- name: mysql
|
||||
image: k8s.gcr.io/mysql-galera:e2e
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
name: mysql
|
||||
- containerPort: 4444
|
||||
name: sst
|
||||
- containerPort: 4567
|
||||
name: replication
|
||||
- containerPort: 4568
|
||||
name: ist
|
||||
args:
|
||||
- --defaults-file=/etc/mysql/my-galera.cnf
|
||||
- --user=root
|
||||
readinessProbe:
|
||||
# TODO: If docker exec is buggy just use k8s.gcr.io/mysql-healthz:1.0
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- "mysql -u root -e 'show databases;'"
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 2
|
||||
volumeMounts:
|
||||
- name: datadir
|
||||
mountPath: /var/lib/
|
||||
- name: config
|
||||
mountPath: /etc/mysql
|
||||
volumes:
|
||||
- name: config
|
||||
emptyDir: {}
|
||||
- name: workdir
|
||||
emptyDir: {}
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: datadir
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
13
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/mysql-upgrade/configmap.yaml
generated
vendored
Normal file
13
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/mysql-upgrade/configmap.yaml
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: mysql
|
||||
labels:
|
||||
app: mysql
|
||||
data:
|
||||
master.cnf: |
|
||||
[mysqld]
|
||||
log-bin
|
||||
slave.cnf: |
|
||||
[mysqld]
|
||||
super-read-only
|
27
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/mysql-upgrade/service.yaml
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/mysql-upgrade/service.yaml
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: mysql
|
||||
labels:
|
||||
app: mysql
|
||||
spec:
|
||||
ports:
|
||||
- name: mysql
|
||||
port: 3306
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: mysql
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: mysql-read
|
||||
labels:
|
||||
app: mysql
|
||||
spec:
|
||||
ports:
|
||||
- name: mysql
|
||||
port: 3306
|
||||
selector:
|
||||
app: mysql
|
||||
type: LoadBalancer
|
162
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/mysql-upgrade/statefulset.yaml
generated
vendored
Normal file
162
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/mysql-upgrade/statefulset.yaml
generated
vendored
Normal file
@ -0,0 +1,162 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: mysql
|
||||
spec:
|
||||
serviceName: mysql
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: mysql
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: mysql
|
||||
spec:
|
||||
initContainers:
|
||||
- name: init-mysql
|
||||
image: mysql:5.7
|
||||
command:
|
||||
- bash
|
||||
- "-c"
|
||||
- |
|
||||
set -ex
|
||||
[[ `hostname` =~ -([0-9]+)$ ]] || exit 1
|
||||
ordinal=${BASH_REMATCH[1]}
|
||||
echo [mysqld] > /mnt/conf.d/server-id.cnf
|
||||
echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
|
||||
if [[ $ordinal -eq 0 ]]; then
|
||||
cp /mnt/config-map/master.cnf /mnt/conf.d/
|
||||
else
|
||||
cp /mnt/config-map/slave.cnf /mnt/conf.d/
|
||||
fi
|
||||
volumeMounts:
|
||||
- name: conf
|
||||
mountPath: /mnt/conf.d
|
||||
- name: config-map
|
||||
mountPath: /mnt/config-map
|
||||
- name: clone-mysql
|
||||
image: gcr.io/google-samples/xtrabackup:1.0
|
||||
command:
|
||||
- bash
|
||||
- "-c"
|
||||
- |
|
||||
set -ex
|
||||
[[ -d /var/lib/mysql/mysql ]] && exit 0
|
||||
[[ `hostname` =~ -([0-9]+)$ ]] || exit 1
|
||||
ordinal=${BASH_REMATCH[1]}
|
||||
[[ $ordinal -eq 0 ]] && exit 0
|
||||
ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql
|
||||
xtrabackup --prepare --target-dir=/var/lib/mysql
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /var/lib/mysql
|
||||
subPath: mysql
|
||||
- name: conf
|
||||
mountPath: /etc/mysql/conf.d
|
||||
containers:
|
||||
- name: mysql
|
||||
image: mysql:5.7.15
|
||||
env:
|
||||
- name: MYSQL_ALLOW_EMPTY_PASSWORD
|
||||
value: "1"
|
||||
ports:
|
||||
- name: mysql
|
||||
containerPort: 3306
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /var/lib/mysql
|
||||
subPath: mysql
|
||||
- name: conf
|
||||
mountPath: /etc/mysql/conf.d
|
||||
resources:
|
||||
requests:
|
||||
cpu: 1
|
||||
memory: 1Gi
|
||||
livenessProbe:
|
||||
exec:
|
||||
command: ["mysqladmin", "ping"]
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 1
|
||||
- name: xtrabackup
|
||||
image: gcr.io/google-samples/xtrabackup:1.0
|
||||
ports:
|
||||
- name: xtrabackup
|
||||
containerPort: 3307
|
||||
command:
|
||||
- bash
|
||||
- "-c"
|
||||
- |
|
||||
set -ex
|
||||
cd /var/lib/mysql
|
||||
|
||||
if [[ -f xtrabackup_slave_info ]]; then
|
||||
mv xtrabackup_slave_info change_master_to.sql.in
|
||||
rm -f xtrabackup_binlog_info
|
||||
elif [[ -f xtrabackup_binlog_info ]]; then
|
||||
[[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
|
||||
rm xtrabackup_binlog_info
|
||||
echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
|
||||
MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in
|
||||
fi
|
||||
|
||||
if [[ -f change_master_to.sql.in ]]; then
|
||||
echo "Waiting for mysqld to be ready (accepting connections)"
|
||||
until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done
|
||||
|
||||
echo "Initializing replication from clone position"
|
||||
mv change_master_to.sql.in change_master_to.sql.orig
|
||||
mysql -h 127.0.0.1 <<EOF
|
||||
$(<change_master_to.sql.orig),
|
||||
MASTER_HOST='mysql-0.mysql',
|
||||
MASTER_USER='root',
|
||||
MASTER_PASSWORD='',
|
||||
MASTER_CONNECT_RETRY=10;
|
||||
START SLAVE;
|
||||
EOF
|
||||
fi
|
||||
|
||||
exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \
|
||||
"xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root"
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /var/lib/mysql
|
||||
subPath: mysql
|
||||
- name: conf
|
||||
mountPath: /etc/mysql/conf.d
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
volumes:
|
||||
- name: conf
|
||||
emptyDir: {}
|
||||
- name: config-map
|
||||
configMap:
|
||||
name: mysql
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
storageClassName: default
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
---
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: mysql-pdb
|
||||
labels:
|
||||
pdb: mysql
|
||||
spec:
|
||||
minAvailable: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: mysql
|
51
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/mysql-upgrade/tester.yaml
generated
vendored
Normal file
51
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/mysql-upgrade/tester.yaml
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: mysql-test-server
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: test-server
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: test-server
|
||||
spec:
|
||||
containers:
|
||||
- name: test-server
|
||||
image: k8s.gcr.io/mysql-e2e-test:0.1
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
initialDelaySeconds: 2
|
||||
periodSeconds: 2
|
||||
---
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: tester-pdb
|
||||
labels:
|
||||
pdb: test-server
|
||||
spec:
|
||||
minAvailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: test-server
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: test-server
|
||||
name: test-server
|
||||
spec:
|
||||
ports:
|
||||
- port: 8080
|
||||
selector:
|
||||
app: test-server
|
||||
type: LoadBalancer
|
13
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/nginx/service.yaml
generated
vendored
Normal file
13
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/nginx/service.yaml
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
name: web
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: nginx
|
34
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/nginx/statefulset.yaml
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/nginx/statefulset.yaml
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: web
|
||||
spec:
|
||||
serviceName: "nginx"
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: {{.NginxImageNew}}
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: web
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: www
|
||||
annotations:
|
||||
volume.beta.kubernetes.io/storage-class: nginx-sc
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
16
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/redis/service.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/redis/service.yaml
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
# A headless service to create DNS records
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: redis
|
||||
labels:
|
||||
app: redis
|
||||
spec:
|
||||
ports:
|
||||
- port: 6379
|
||||
name: peer
|
||||
# *.redis.default.svc.cluster.local
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: redis
|
||||
publishNotReadyAddresses: true
|
81
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/redis/statefulset.yaml
generated
vendored
Normal file
81
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/redis/statefulset.yaml
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: rd
|
||||
spec:
|
||||
serviceName: "redis"
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: redis
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
spec:
|
||||
initContainers:
|
||||
- name: install
|
||||
image: k8s.gcr.io/e2e-test-images/pets/redis-installer:1.5
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "--install-into=/opt"
|
||||
- "--work-dir=/work-dir"
|
||||
volumeMounts:
|
||||
- name: opt
|
||||
mountPath: "/opt"
|
||||
- name: workdir
|
||||
mountPath: "/work-dir"
|
||||
- name: bootstrap
|
||||
image: debian:jessie
|
||||
command:
|
||||
- "/work-dir/peer-finder"
|
||||
args:
|
||||
- -on-start="/work-dir/on-start.sh"
|
||||
- "-service=redis"
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
- name: opt
|
||||
mountPath: "/opt"
|
||||
- name: workdir
|
||||
mountPath: "/work-dir"
|
||||
containers:
|
||||
- name: redis
|
||||
image: debian:jessie
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
name: peer
|
||||
command:
|
||||
- /opt/redis/redis-server
|
||||
args:
|
||||
- /opt/redis/redis.conf
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- "/opt/redis/redis-cli -h $(hostname) ping"
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 5
|
||||
volumeMounts:
|
||||
- name: datadir
|
||||
mountPath: /data
|
||||
- name: opt
|
||||
mountPath: /opt
|
||||
volumes:
|
||||
- name: opt
|
||||
emptyDir: {}
|
||||
- name: workdir
|
||||
emptyDir: {}
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: datadir
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
18
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/zookeeper/service.yaml
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/zookeeper/service.yaml
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
# A headless service to create DNS records
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: zk
|
||||
labels:
|
||||
app: zk
|
||||
spec:
|
||||
ports:
|
||||
- port: 2888
|
||||
name: peer
|
||||
- port: 3888
|
||||
name: leader-election
|
||||
# *.zk.default.svc.cluster.local
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: zk
|
||||
publishNotReadyAddresses: true
|
88
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/zookeeper/statefulset.yaml
generated
vendored
Normal file
88
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/zookeeper/statefulset.yaml
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: zoo
|
||||
spec:
|
||||
serviceName: "zk"
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: zk
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: zk
|
||||
spec:
|
||||
initContainers:
|
||||
- name: install
|
||||
image: k8s.gcr.io/e2e-test-images/pets/zookeeper-installer:1.5
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "--install-into=/opt"
|
||||
- "--work-dir=/work-dir"
|
||||
volumeMounts:
|
||||
- name: opt
|
||||
mountPath: "/opt/"
|
||||
- name: workdir
|
||||
mountPath: "/work-dir"
|
||||
- name: bootstrap
|
||||
image: java:openjdk-8-jre
|
||||
command:
|
||||
- "/work-dir/peer-finder"
|
||||
args:
|
||||
- -on-start="/work-dir/on-start.sh"
|
||||
- "-service=zk"
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
- name: opt
|
||||
mountPath: "/opt"
|
||||
- name: workdir
|
||||
mountPath: "/work-dir"
|
||||
- name: datadir
|
||||
mountPath: "/tmp/zookeeper"
|
||||
containers:
|
||||
- name: zk
|
||||
image: openjdk:8-jre
|
||||
ports:
|
||||
- containerPort: 2888
|
||||
name: peer
|
||||
- containerPort: 3888
|
||||
name: leader-election
|
||||
command:
|
||||
- /opt/zookeeper/bin/zkServer.sh
|
||||
args:
|
||||
- start-foreground
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- "/opt/zookeeper/bin/zkCli.sh ls /"
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 5
|
||||
volumeMounts:
|
||||
- name: datadir
|
||||
mountPath: /tmp/zookeeper
|
||||
- name: opt
|
||||
mountPath: /opt
|
||||
# Mount the work-dir just for debugging
|
||||
- name: workdir
|
||||
mountPath: /work-dir
|
||||
volumes:
|
||||
- name: opt
|
||||
emptyDir: {}
|
||||
- name: workdir
|
||||
emptyDir: {}
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: datadir
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
19
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/OWNERS
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/OWNERS
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- saad-ali
|
||||
- gnufied
|
||||
- jingxu97
|
||||
- jsafrane
|
||||
- msau42
|
||||
- xing-yang
|
||||
reviewers:
|
||||
- saad-ali
|
||||
- gnufied
|
||||
- jingxu97
|
||||
- jsafrane
|
||||
- msau42
|
||||
- xing-yang
|
||||
emeritus_approvers:
|
||||
- davidz627
|
||||
- rootfs
|
50
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/any-volume-datasource/crd/hello-populator-crd.yaml
generated
vendored
Normal file
50
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/any-volume-datasource/crd/hello-populator-crd.yaml
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: hellos.hello.example.com
|
||||
spec:
|
||||
group: hello.example.com
|
||||
names:
|
||||
kind: Hello
|
||||
listKind: HelloList
|
||||
plural: hellos
|
||||
singular: hello
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: Hello is a specification for a Hello resource
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
spec:
|
||||
description: HelloSpec is the spec for a Hello resource
|
||||
properties:
|
||||
fileContents:
|
||||
type: string
|
||||
fileName:
|
||||
type: string
|
||||
required:
|
||||
- fileContents
|
||||
- fileName
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
57
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/any-volume-datasource/crd/populator.storage.k8s.io_volumepopulators.yaml
generated
vendored
Normal file
57
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/any-volume-datasource/crd/populator.storage.k8s.io_volumepopulators.yaml
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.5.0
|
||||
api-approved.kubernetes.io: https://github.com/kubernetes/enhancements/pull/2934
|
||||
creationTimestamp: null
|
||||
name: volumepopulators.populator.storage.k8s.io
|
||||
spec:
|
||||
group: populator.storage.k8s.io
|
||||
names:
|
||||
kind: VolumePopulator
|
||||
listKind: VolumePopulatorList
|
||||
plural: volumepopulators
|
||||
singular: volumepopulator
|
||||
scope: Cluster
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .sourceKind
|
||||
name: SourceKind
|
||||
type: string
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: VolumePopulator represents the registration for a volume populator. VolumePopulators are cluster scoped.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
sourceKind:
|
||||
description: Kind of the data source this populator supports
|
||||
properties:
|
||||
group:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
required:
|
||||
- group
|
||||
- kind
|
||||
type: object
|
||||
required:
|
||||
- sourceKind
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources: {}
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
68
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/any-volume-datasource/hello-populator-deploy.yaml
generated
vendored
Normal file
68
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/any-volume-datasource/hello-populator-deploy.yaml
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: hello-account
|
||||
namespace: hello
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: hello-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: [persistentvolumes]
|
||||
verbs: [get, list, watch, patch]
|
||||
- apiGroups: [""]
|
||||
resources: [persistentvolumeclaims]
|
||||
verbs: [get, list, watch, patch, create, delete]
|
||||
- apiGroups: [""]
|
||||
resources: [pods]
|
||||
verbs: [get, list, watch, create, delete]
|
||||
- apiGroups: [storage.k8s.io]
|
||||
resources: [storageclasses]
|
||||
verbs: [get, list, watch]
|
||||
|
||||
- apiGroups: [hello.example.com]
|
||||
resources: [hellos]
|
||||
verbs: [get, list, watch]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: hello-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: hello-account
|
||||
namespace: hello
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: hello-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: hello-populator
|
||||
namespace: hello
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hello
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hello
|
||||
spec:
|
||||
serviceAccount: hello-account
|
||||
containers:
|
||||
- name: hello
|
||||
image: k8s.gcr.io/sig-storage/hello-populator:v1.0.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- --mode=controller
|
||||
- --image-name=k8s.gcr.io/sig-storage/hello-populator:v1.0.1
|
||||
- --http-endpoint=:8080
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http-endpoint
|
||||
protocol: TCP
|
@ -0,0 +1,37 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: volume-data-source-validator
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: volume-data-source-validator
|
||||
rules:
|
||||
- apiGroups: [populator.storage.k8s.io]
|
||||
resources: [volumepopulators]
|
||||
verbs: [get, list, watch]
|
||||
- apiGroups: [""]
|
||||
resources: [persistentvolumeclaims]
|
||||
verbs: [get, list, watch]
|
||||
- apiGroups: [""]
|
||||
resources: [events]
|
||||
verbs: [list, watch, create, update, patch]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: volume-data-source-validator
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: volume-data-source-validator
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: volume-data-source-validator
|
||||
apiGroup: rbac.authorization.k8s.io
|
@ -0,0 +1,24 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: volume-data-source-validator
|
||||
namespace: kube-system
|
||||
spec:
|
||||
serviceName: volume-data-source-validator
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: volume-data-source-validator
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: volume-data-source-validator
|
||||
spec:
|
||||
serviceAccount: volume-data-source-validator
|
||||
containers:
|
||||
- name: volume-data-source-validator
|
||||
image: k8s.gcr.io/sig-storage/volume-data-source-validator:v1.0.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--leader-election=false"
|
||||
imagePullPolicy: Always
|
4
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/controller-role.yaml
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/controller-role.yaml
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
# Replaced by individual roles for external-attacher, external-provisioner and external-snapshotter:
|
||||
# - https://github.com/kubernetes-csi/external-attacher/blob/master/deploy/kubernetes/rbac.yaml
|
||||
# - https://github.com/kubernetes-csi/external-provisioner/blob/master/deploy/kubernetes/rbac.yaml
|
||||
# - https://github.com/kubernetes-csi/external-snapshotter/blob/master/deploy/kubernetes/rbac.yaml
|
93
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml
generated
vendored
Normal file
93
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml
generated
vendored
Normal file
@ -0,0 +1,93 @@
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-attacher/raw/v3.3.0/deploy/kubernetes//rbac.yaml
|
||||
# for csi-driver-host-path v1.7.3
|
||||
# by ./update-hostpath.sh
|
||||
#
|
||||
# This YAML file contains all RBAC objects that are necessary to run external
|
||||
# CSI attacher.
|
||||
#
|
||||
# In production, each CSI driver deployment has to be customized:
|
||||
# - to avoid conflicts, use non-default namespace and different names
|
||||
# for non-namespaced entities like the ClusterRole
|
||||
# - decide whether the deployment replicates the external CSI
|
||||
# attacher, in which case leadership election must be enabled;
|
||||
# this influences the RBAC setup, see below
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-attacher
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
|
||||
---
|
||||
# Attacher must be able to work with PVs, CSINodes and VolumeAttachments
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: external-attacher-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["csinodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments/status"]
|
||||
verbs: ["patch"]
|
||||
#Secret permission is optional.
|
||||
#Enable it if you need value from secret.
|
||||
#For example, you have key `csi.storage.k8s.io/controller-publish-secret-name` in StorageClass.parameters
|
||||
#see https://kubernetes-csi.github.io/docs/secrets-and-credentials.html
|
||||
# - apiGroups: [""]
|
||||
# resources: ["secrets"]
|
||||
# verbs: ["get", "list"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-attacher-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-attacher
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: external-attacher-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
# Attacher must be able to work with configmaps or leases in the current namespace
|
||||
# if (and only if) leadership election is enabled
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
name: external-attacher-cfg
|
||||
rules:
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-attacher-role-cfg
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-attacher
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: external-attacher-cfg
|
||||
apiGroup: rbac.authorization.k8s.io
|
89
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-controller/rbac.yaml
generated
vendored
Normal file
89
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-controller/rbac.yaml
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-health-monitor/raw/v0.4.0/deploy/kubernetes/external-health-monitor-controller/rbac.yaml
|
||||
# for csi-driver-host-path v1.7.3
|
||||
# by ./update-hostpath.sh
|
||||
#
|
||||
# This YAML file contains all RBAC objects that are necessary to run external
|
||||
# CSI health monitor controller.
|
||||
#
|
||||
# In production, each CSI driver deployment has to be customized:
|
||||
# - to avoid conflicts, use non-default namespace and different names
|
||||
# for non-namespaced entities like the ClusterRole
|
||||
# - decide whether the deployment replicates the external CSI
|
||||
# health monitor controller, in which case leadership election must be enabled;
|
||||
# this influences the RBAC setup, see below
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-external-health-monitor-controller
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
|
||||
---
|
||||
# Health monitor controller must be able to work with PVs, PVCs, Nodes and Pods
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: external-health-monitor-controller-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["get", "list", "watch", "create", "patch"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-external-health-monitor-controller-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-external-health-monitor-controller
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: external-health-monitor-controller-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
# Health monitor controller must be able to work with configmaps or leases in the current namespace
|
||||
# if (and only if) leadership election is enabled
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
name: external-health-monitor-controller-cfg
|
||||
rules:
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-external-health-monitor-controller-role-cfg
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-external-health-monitor-controller
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: external-health-monitor-controller-cfg
|
||||
apiGroup: rbac.authorization.k8s.io
|
129
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml
generated
vendored
Normal file
129
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml
generated
vendored
Normal file
@ -0,0 +1,129 @@
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-provisioner/raw/v3.0.0/deploy/kubernetes//rbac.yaml
|
||||
# for csi-driver-host-path v1.7.3
|
||||
# by ./update-hostpath.sh
|
||||
#
|
||||
# This YAML file contains all RBAC objects that are necessary to run external
|
||||
# CSI provisioner.
|
||||
#
|
||||
# In production, each CSI driver deployment has to be customized:
|
||||
# - to avoid conflicts, use non-default namespace and different names
|
||||
# for non-namespaced entities like the ClusterRole
|
||||
# - decide whether the deployment replicates the external CSI
|
||||
# provisioner, in which case leadership election must be enabled;
|
||||
# this influences the RBAC setup, see below
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-provisioner
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: external-provisioner-runner
|
||||
rules:
|
||||
# The following rule should be uncommented for plugins that require secrets
|
||||
# for provisioning.
|
||||
# - apiGroups: [""]
|
||||
# resources: ["secrets"]
|
||||
# verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshots"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["csinodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
# Access to volumeattachments is only needed when the CSI driver
|
||||
# has the PUBLISH_UNPUBLISH_VOLUME controller capability.
|
||||
# In that case, external-provisioner will watch volumeattachments
|
||||
# to determine when it is safe to delete a volume.
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-provisioner-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-provisioner
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: external-provisioner-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
# Provisioner must be able to work with endpoints in current namespace
|
||||
# if (and only if) leadership election is enabled
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
name: external-provisioner-cfg
|
||||
rules:
|
||||
# Only one of the following rules for endpoints or leases is required based on
|
||||
# what is set for `--leader-election-type`. Endpoints are deprecated in favor of Leases.
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
# Permissions for CSIStorageCapacity are only needed enabling the publishing
|
||||
# of storage capacity information.
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["csistoragecapacities"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
# The GET permissions below are needed for walking up the ownership chain
|
||||
# for CSIStorageCapacity. They are sufficient for deployment via
|
||||
# StatefulSet (only needs to get Pod) and Deployment (needs to get
|
||||
# Pod and then ReplicaSet to find the Deployment).
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["replicasets"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-provisioner-role-cfg
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-provisioner
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: external-provisioner-cfg
|
||||
apiGroup: rbac.authorization.k8s.io
|
94
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml
generated
vendored
Normal file
94
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml
generated
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-resizer/raw/v1.3.0/deploy/kubernetes//rbac.yaml
|
||||
# for csi-driver-host-path v1.7.3
|
||||
# by ./update-hostpath.sh
|
||||
#
|
||||
# This YAML file contains all RBAC objects that are necessary to run external
|
||||
# CSI resizer.
|
||||
#
|
||||
# In production, each CSI driver deployment has to be customized:
|
||||
# - to avoid conflicts, use non-default namespace and different names
|
||||
# for non-namespaced entities like the ClusterRole
|
||||
# - decide whether the deployment replicates the external CSI
|
||||
# resizer, in which case leadership election must be enabled;
|
||||
# this influences the RBAC setup, see below
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-resizer
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
|
||||
---
|
||||
# Resizer must be able to work with PVCs, PVs, SCs.
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: external-resizer-runner
|
||||
rules:
|
||||
# The following rule should be uncommented for plugins that require secrets
|
||||
# for provisioning.
|
||||
# - apiGroups: [""]
|
||||
# resources: ["secrets"]
|
||||
# verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims/status"]
|
||||
verbs: ["patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-resizer-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-resizer
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: external-resizer-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
# Resizer must be able to work with end point in current namespace
|
||||
# if (and only if) leadership election is enabled
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
name: external-resizer-cfg
|
||||
rules:
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-resizer-role-cfg
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-resizer
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: external-resizer-cfg
|
||||
apiGroup: rbac.authorization.k8s.io
|
88
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/csi-snapshotter/rbac-csi-snapshotter.yaml
generated
vendored
Normal file
88
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/csi-snapshotter/rbac-csi-snapshotter.yaml
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-snapshotter/raw/v5.0.0-rc1/deploy/kubernetes/csi-snapshotter/rbac-csi-snapshotter.yaml
|
||||
# for csi-driver-host-path master
|
||||
# by ./update-hostpath.sh
|
||||
#
|
||||
# Together with the RBAC file for external-provisioner, this YAML file
|
||||
# contains all RBAC objects that are necessary to run external CSI
|
||||
# snapshotter.
|
||||
#
|
||||
# In production, each CSI driver deployment has to be customized:
|
||||
# - to avoid conflicts, use non-default namespace and different names
|
||||
# for non-namespaced entities like the ClusterRole
|
||||
# - optionally rename the non-namespaced ClusterRole if there
|
||||
# are conflicts with other deployments
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-snapshotter
|
||||
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
# rename if there are conflicts
|
||||
name: external-snapshotter-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
# Secret permission is optional.
|
||||
# Enable it if your driver needs secret.
|
||||
# For example, `csi.storage.k8s.io/snapshotter-secret-name` is set in VolumeSnapshotClass.
|
||||
# See https://kubernetes-csi.github.io/docs/secrets-and-credentials.html for more details.
|
||||
# - apiGroups: [""]
|
||||
# resources: ["secrets"]
|
||||
# verbs: ["get", "list"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents"]
|
||||
verbs: ["create", "get", "list", "watch", "update", "delete", "patch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents/status"]
|
||||
verbs: ["update", "patch"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-snapshotter-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-snapshotter
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
# change the name also here if the ClusterRole gets renamed
|
||||
name: external-snapshotter-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
namespace: default # TODO: replace with the namespace you want for your sidecar
|
||||
name: external-snapshotter-leaderelection
|
||||
rules:
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: external-snapshotter-leaderelection
|
||||
namespace: default # TODO: replace with the namespace you want for your sidecar
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-snapshotter
|
||||
namespace: default # TODO: replace with the namespace you want for your sidecar
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: external-snapshotter-leaderelection
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
152
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml
generated
vendored
Normal file
152
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml
generated
vendored
Normal file
@ -0,0 +1,152 @@
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-gce-pd-controller
|
||||
spec:
|
||||
serviceName: "csi-gce-pd"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: gcp-compute-persistent-disk-csi-driver
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: gcp-compute-persistent-disk-csi-driver
|
||||
spec:
|
||||
# Host network must be used for interaction with Workload Identity in GKE
|
||||
# since it replaces GCE Metadata Server with GKE Metadata Server. Remove
|
||||
# this requirement when issue is resolved and before any exposure of
|
||||
# metrics ports
|
||||
hostNetwork: true
|
||||
serviceAccountName: csi-gce-pd-controller-sa
|
||||
containers:
|
||||
- name: csi-snapshotter
|
||||
image: k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.3
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
- "--metrics-address=:22014"
|
||||
- "--leader-election"
|
||||
- "--leader-election-namespace=$(PDCSI_NAMESPACE)"
|
||||
- "--timeout=300s"
|
||||
env:
|
||||
- name: PDCSI_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-provisioner
|
||||
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.1.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
- "--feature-gates=Topology=true"
|
||||
- "--http-endpoint=:22011"
|
||||
- "--leader-election-namespace=$(PDCSI_NAMESPACE)"
|
||||
- "--timeout=250s"
|
||||
- "--extra-create-metadata"
|
||||
# - "--run-controller-service=false" # disable the controller service of the CSI driver
|
||||
# - "--run-node-service=false" # disable the node service of the CSI driver
|
||||
- "--leader-election"
|
||||
- "--default-fstype=ext4"
|
||||
env:
|
||||
- name: PDCSI_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 22011
|
||||
name: http-endpoint
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 1
|
||||
httpGet:
|
||||
path: /healthz/leader-election
|
||||
port: http-endpoint
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 10
|
||||
periodSeconds: 20
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-attacher
|
||||
image: k8s.gcr.io/sig-storage/csi-attacher:v3.1.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
- "--http-endpoint=:22012"
|
||||
- "--leader-election"
|
||||
- "--leader-election-namespace=$(PDCSI_NAMESPACE)"
|
||||
- "--timeout=250s"
|
||||
env:
|
||||
- name: PDCSI_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 22012
|
||||
name: http-endpoint
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 1
|
||||
httpGet:
|
||||
path: /healthz/leader-election
|
||||
port: http-endpoint
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 10
|
||||
periodSeconds: 20
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-resizer
|
||||
image: k8s.gcr.io/sig-storage/csi-resizer:v1.1.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
- "--http-endpoint=:22013"
|
||||
- "--leader-election"
|
||||
- "--leader-election-namespace=$(PDCSI_NAMESPACE)"
|
||||
- "--handle-volume-inuse-error=false"
|
||||
env:
|
||||
- name: PDCSI_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 22013
|
||||
name: http-endpoint
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 1
|
||||
httpGet:
|
||||
path: /healthz/leader-election
|
||||
port: http-endpoint
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 10
|
||||
periodSeconds: 20
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: gce-pd-driver
|
||||
image: k8s.gcr.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.2.2
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--endpoint=unix:/csi/csi.sock"
|
||||
env:
|
||||
- name: GOOGLE_APPLICATION_CREDENTIALS
|
||||
value: "/etc/cloud-sa/cloud-sa.json"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: cloud-sa-volume
|
||||
readOnly: true
|
||||
mountPath: "/etc/cloud-sa"
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
emptyDir: {}
|
||||
- name: cloud-sa-volume
|
||||
secret:
|
||||
secretName: cloud-sa
|
198
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/gce-pd/csi-controller-rbac.yaml
generated
vendored
Normal file
198
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/gce-pd/csi-controller-rbac.yaml
generated
vendored
Normal file
@ -0,0 +1,198 @@
|
||||
##### Controller Service Account, Roles, Rolebindings
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-gce-pd-controller-sa
|
||||
|
||||
---
|
||||
# xref: https://github.com/kubernetes-csi/external-provisioner/blob/master/deploy/kubernetes/rbac.yaml
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-gce-pd-provisioner-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["csinodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshots"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents"]
|
||||
verbs: ["get", "list"]
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-gce-pd-controller-provisioner-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-gce-pd-controller-sa
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: csi-gce-pd-provisioner-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
# xref: https://github.com/kubernetes-csi/external-attacher/blob/master/deploy/kubernetes/rbac.yaml
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-gce-pd-attacher-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["csinodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments/status"]
|
||||
verbs: ["patch"]
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-gce-pd-controller-attacher-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-gce-pd-controller-sa
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: csi-gce-pd-attacher-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
|
||||
# Resizer must be able to work with PVCs, PVs, SCs.
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-gce-pd-resizer-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims/status"]
|
||||
verbs: ["update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-gce-pd-resizer-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-gce-pd-controller-sa
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: csi-gce-pd-resizer-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
# xref: https://github.com/kubernetes-csi/external-snapshotter/blob/master/deploy/kubernetes/csi-snapshotter/rbac-csi-snapshotter.yaml
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-gce-pd-snapshotter-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents"]
|
||||
verbs: ["get", "list", "watch", "update", "delete", "patch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents/status"]
|
||||
verbs: ["update", "patch"]
|
||||
---
|
||||
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-gce-pd-controller-snapshotter-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-gce-pd-controller-sa
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: csi-gce-pd-snapshotter-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-gce-pd-leaderelection-role
|
||||
namespace: gce-pd-csi-driver
|
||||
labels:
|
||||
k8s-app: gcp-compute-persistent-disk-csi-driver
|
||||
rules:
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
---
|
||||
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-gce-pd-controller-leaderelection-binding
|
||||
namespace: gce-pd-csi-driver
|
||||
labels:
|
||||
k8s-app: gcp-compute-persistent-disk-csi-driver
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-gce-pd-controller-sa
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: csi-gce-pd-leaderelection-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
|
||||
# priviledged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding()
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: psp-csi-controller-driver-registrar-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-gce-pd-controller-sa
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: e2e-test-privileged-psp
|
||||
apiGroup: rbac.authorization.k8s.io
|
115
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml
generated
vendored
Normal file
115
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml
generated
vendored
Normal file
@ -0,0 +1,115 @@
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-gce-pd-node
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: gcp-compute-persistent-disk-csi-driver
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: gcp-compute-persistent-disk-csi-driver
|
||||
spec:
|
||||
containers:
|
||||
- name: csi-driver-registrar
|
||||
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.1.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
- "--kubelet-registration-path=/var/lib/kubelet/plugins/pd.csi.storage.gke.io/csi.sock"
|
||||
- "--http-endpoint=:22013"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command: ["/bin/sh", "-c", "rm -rf /registration/pd.csi.storage.gke.io /registration/pd.csi.storage.gke.io-reg.sock"]
|
||||
env:
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
ports:
|
||||
- containerPort: 22013
|
||||
name: http-endpoint
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 1
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: http-endpoint
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 10
|
||||
periodSeconds: 20
|
||||
volumeMounts:
|
||||
- name: plugin-dir
|
||||
mountPath: /csi
|
||||
- name: registration-dir
|
||||
mountPath: /registration
|
||||
- name: gce-pd-driver
|
||||
securityContext:
|
||||
privileged: true
|
||||
image: k8s.gcr.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.2.2
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--endpoint=unix:/csi/csi.sock"
|
||||
- "--run-controller-service=false"
|
||||
volumeMounts:
|
||||
- name: kubelet-dir
|
||||
mountPath: /var/lib/kubelet
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: plugin-dir
|
||||
mountPath: /csi
|
||||
- name: device-dir
|
||||
mountPath: /dev
|
||||
# The following mounts are required to trigger host udevadm from
|
||||
# container
|
||||
- name: udev-rules-etc
|
||||
mountPath: /etc/udev
|
||||
- name: udev-rules-lib
|
||||
mountPath: /lib/udev
|
||||
- name: udev-socket
|
||||
mountPath: /run/udev
|
||||
- name: sys
|
||||
mountPath: /sys
|
||||
volumes:
|
||||
- name: registration-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins_registry/
|
||||
type: Directory
|
||||
- name: kubelet-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet
|
||||
type: Directory
|
||||
- name: plugin-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/pd.csi.storage.gke.io/
|
||||
type: DirectoryOrCreate
|
||||
- name: device-dir
|
||||
hostPath:
|
||||
path: /dev
|
||||
type: Directory
|
||||
# The following mounts are required to trigger host udevadm from
|
||||
# container
|
||||
- name: udev-rules-etc
|
||||
hostPath:
|
||||
path: /etc/udev
|
||||
type: Directory
|
||||
- name: udev-rules-lib
|
||||
hostPath:
|
||||
path: /lib/udev
|
||||
type: Directory
|
||||
- name: udev-socket
|
||||
hostPath:
|
||||
path: /run/udev
|
||||
type: Directory
|
||||
- name: sys
|
||||
hostPath:
|
||||
path: /sys
|
||||
type: Directory
|
||||
# https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
# See "special case". This will tolerate everything. Node component should
|
||||
# be scheduled on all nodes.
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
4
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
The files in this directory are exact copys of "kubernetes-latest" in
|
||||
https://github.com/kubernetes-csi/csi-driver-host-path/tree/v1.7.3/deploy/
|
||||
|
||||
Do not edit manually. Run ./update-hostpath.sh to refresh the content.
|
17
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-driverinfo.yaml
generated
vendored
Normal file
17
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-driverinfo.yaml
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: CSIDriver
|
||||
metadata:
|
||||
name: hostpath.csi.k8s.io
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/component: csi-driver
|
||||
spec:
|
||||
# Supports persistent and ephemeral inline volumes.
|
||||
volumeLifecycleModes:
|
||||
- Persistent
|
||||
- Ephemeral
|
||||
# To determine at runtime which mode a volume uses, pod info and its
|
||||
# "csi.storage.k8s.io/ephemeral" entry are needed.
|
||||
podInfoOnMount: true
|
394
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml
generated
vendored
Normal file
394
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml
generated
vendored
Normal file
@ -0,0 +1,394 @@
|
||||
# All of the individual sidecar RBAC roles get bound
|
||||
# to this account.
|
||||
kind: ServiceAccount
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: csi-hostpathplugin-sa
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: serviceaccount
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: attacher-cluster-role
|
||||
name: csi-hostpathplugin-attacher-cluster-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: external-attacher-runner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: health-monitor-controller-cluster-role
|
||||
name: csi-hostpathplugin-health-monitor-controller-cluster-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: external-health-monitor-controller-runner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: provisioner-cluster-role
|
||||
name: csi-hostpathplugin-provisioner-cluster-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: external-provisioner-runner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: resizer-cluster-role
|
||||
name: csi-hostpathplugin-resizer-cluster-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: external-resizer-runner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: snapshotter-cluster-role
|
||||
name: csi-hostpathplugin-snapshotter-cluster-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: external-snapshotter-runner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: attacher-role
|
||||
name: csi-hostpathplugin-attacher-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: external-attacher-cfg
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: health-monitor-controller-role
|
||||
name: csi-hostpathplugin-health-monitor-controller-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: external-health-monitor-controller-cfg
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: provisioner-role
|
||||
name: csi-hostpathplugin-provisioner-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: external-provisioner-cfg
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: resizer-role
|
||||
name: csi-hostpathplugin-resizer-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: external-resizer-cfg
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: snapshotter-role
|
||||
name: csi-hostpathplugin-snapshotter-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: external-snapshotter-leaderelection
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-hostpathplugin
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: plugin
|
||||
spec:
|
||||
serviceName: "csi-hostpathplugin"
|
||||
# One replica only:
|
||||
# Host path driver only works when everything runs
|
||||
# on a single node.
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: plugin
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: plugin
|
||||
spec:
|
||||
serviceAccountName: csi-hostpathplugin-sa
|
||||
containers:
|
||||
- name: hostpath
|
||||
image: k8s.gcr.io/sig-storage/hostpathplugin:v1.7.3
|
||||
args:
|
||||
- "--drivername=hostpath.csi.k8s.io"
|
||||
- "--v=5"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--nodeid=$(KUBE_NODE_NAME)"
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///csi/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
securityContext:
|
||||
privileged: true
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
name: healthz
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: healthz
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 3
|
||||
periodSeconds: 2
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- mountPath: /var/lib/kubelet/pods
|
||||
mountPropagation: Bidirectional
|
||||
name: mountpoint-dir
|
||||
- mountPath: /var/lib/kubelet/plugins
|
||||
mountPropagation: Bidirectional
|
||||
name: plugins-dir
|
||||
- mountPath: /csi-data-dir
|
||||
name: csi-data-dir
|
||||
- mountPath: /dev
|
||||
name: dev-dir
|
||||
|
||||
- name: csi-external-health-monitor-controller
|
||||
image: k8s.gcr.io/sig-storage/csi-external-health-monitor-controller:v0.4.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--leader-election"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
|
||||
- name: node-driver-registrar
|
||||
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock
|
||||
securityContext:
|
||||
# This is necessary only for systems with SELinux, where
|
||||
# non-privileged sidecar containers cannot access unix domain socket
|
||||
# created by privileged CSI driver container.
|
||||
privileged: true
|
||||
env:
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- mountPath: /registration
|
||||
name: registration-dir
|
||||
- mountPath: /csi-data-dir
|
||||
name: csi-data-dir
|
||||
|
||||
- name: liveness-probe
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
image: k8s.gcr.io/sig-storage/livenessprobe:v2.4.0
|
||||
args:
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --health-port=9898
|
||||
|
||||
- name: csi-attacher
|
||||
image: k8s.gcr.io/sig-storage/csi-attacher:v3.3.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
securityContext:
|
||||
# This is necessary only for systems with SELinux, where
|
||||
# non-privileged sidecar containers cannot access unix domain socket
|
||||
# created by privileged CSI driver container.
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-provisioner
|
||||
image: k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0
|
||||
args:
|
||||
- -v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --feature-gates=Topology=true
|
||||
securityContext:
|
||||
# This is necessary only for systems with SELinux, where
|
||||
# non-privileged sidecar containers cannot access unix domain socket
|
||||
# created by privileged CSI driver container.
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-resizer
|
||||
image: k8s.gcr.io/sig-storage/csi-resizer:v1.3.0
|
||||
args:
|
||||
- -v=5
|
||||
- -csi-address=/csi/csi.sock
|
||||
securityContext:
|
||||
# This is necessary only for systems with SELinux, where
|
||||
# non-privileged sidecar containers cannot access unix domain socket
|
||||
# created by privileged CSI driver container.
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-snapshotter
|
||||
image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.1
|
||||
args:
|
||||
- -v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
securityContext:
|
||||
# This is necessary only for systems with SELinux, where
|
||||
# non-privileged sidecar containers cannot access unix domain socket
|
||||
# created by privileged CSI driver container.
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-hostpath
|
||||
type: DirectoryOrCreate
|
||||
name: socket-dir
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
type: DirectoryOrCreate
|
||||
name: mountpoint-dir
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins_registry
|
||||
type: Directory
|
||||
name: registration-dir
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins
|
||||
type: Directory
|
||||
name: plugins-dir
|
||||
- hostPath:
|
||||
# 'path' is where PV data is persisted on host.
|
||||
# using /tmp is also possible while the PVs will not available after plugin container recreation or host reboot
|
||||
path: /var/lib/csi-hostpath-data/
|
||||
type: DirectoryOrCreate
|
||||
name: csi-data-dir
|
||||
- hostPath:
|
||||
path: /dev
|
||||
type: Directory
|
||||
name: dev-dir
|
13
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-snapshotclass.yaml
generated
vendored
Normal file
13
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-snapshotclass.yaml
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
# Usage of the v1 API implies that the cluster must have
|
||||
# external-snapshotter v4.x installed.
|
||||
apiVersion: snapshot.storage.k8s.io/v1
|
||||
kind: VolumeSnapshotClass
|
||||
metadata:
|
||||
name: csi-hostpath-snapclass
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpath-snapclass
|
||||
app.kubernetes.io/component: volumesnapshotclass
|
||||
driver: hostpath.csi.k8s.io #csi-hostpath
|
||||
deletionPolicy: Delete
|
83
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-testing.yaml
generated
vendored
Normal file
83
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-testing.yaml
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
# WARNING: this is only for testing purposes. Do not install in a production
|
||||
# cluster.
|
||||
#
|
||||
# This exposes the hostpath's Unix domain csi.sock as a TCP port to the
|
||||
# outside world. The mapping from Unix domain socket to TCP is done
|
||||
# by socat.
|
||||
#
|
||||
# This is useful for testing with csi-sanity or csc.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: hostpath-service
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpath-socat
|
||||
app.kubernetes.io/component: socat
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpath-socat
|
||||
app.kubernetes.io/component: socat
|
||||
ports:
|
||||
- port: 10000 # fixed port inside the pod, dynamically allocated port outside
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-hostpath-socat
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpath-socat
|
||||
app.kubernetes.io/component: socat
|
||||
spec:
|
||||
serviceName: "csi-hostpath-socat"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpath-socat
|
||||
app.kubernetes.io/component: socat
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpath-socat
|
||||
app.kubernetes.io/component: socat
|
||||
spec:
|
||||
affinity:
|
||||
podAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/instance
|
||||
operator: In
|
||||
values:
|
||||
- hostpath.csi.k8s.io
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: socat
|
||||
image: alpine/socat:1.0.3
|
||||
args:
|
||||
- tcp-listen:10000,fork,reuseaddr
|
||||
- unix-connect:/csi/csi.sock
|
||||
securityContext:
|
||||
# This is necessary only for systems with SELinux, where
|
||||
# non-privileged sidecar containers cannot access unix domain socket
|
||||
# created by privileged CSI driver container.
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-hostpath
|
||||
type: DirectoryOrCreate
|
||||
name: socket-dir
|
31
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/e2e-test-rbac.yaml
generated
vendored
Normal file
31
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/e2e-test-rbac.yaml
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
# priviledged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding()
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: psp-csi-hostpath-role
|
||||
subjects:
|
||||
# This list of ServiceAccount intentionally covers everything that might
|
||||
# be needed. In practice, only some of these accounts are actually
|
||||
# used.
|
||||
- kind: ServiceAccount
|
||||
name: csi-attacher
|
||||
namespace: default
|
||||
- kind: ServiceAccount
|
||||
name: csi-provisioner
|
||||
namespace: default
|
||||
- kind: ServiceAccount
|
||||
name: csi-snapshotter
|
||||
namespace: default
|
||||
- kind: ServiceAccount
|
||||
name: csi-resizer
|
||||
namespace: default
|
||||
- kind: ServiceAccount
|
||||
name: csi-external-health-monitor-controller
|
||||
namespace: default
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: e2e-test-privileged-psp
|
||||
apiGroup: rbac.authorization.k8s.io
|
35
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml
generated
vendored
Normal file
35
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-mockplugin-attacher
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-mockplugin-attacher
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-mockplugin-attacher
|
||||
spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-attacher
|
||||
image: k8s.gcr.io/sig-storage/csi-attacher:v3.3.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=$(ADDRESS)
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-mock
|
||||
type: DirectoryOrCreate
|
||||
name: socket-dir
|
||||
|
34
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-resizer.yaml
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-resizer.yaml
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-mockplugin-resizer
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-mockplugin-resizer
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-mockplugin-resizer
|
||||
spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-resizer
|
||||
image: k8s.gcr.io/sig-storage/csi-resizer:v1.3.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-mock
|
||||
type: DirectoryOrCreate
|
||||
name: socket-dir
|
36
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-snapshotter.yaml
generated
vendored
Normal file
36
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-snapshotter.yaml
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-mockplugin-snapshotter
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-mockplugin-snapshotter
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-mockplugin-snapshotter
|
||||
spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-snapshotter
|
||||
image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.1
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--leader-election=false"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
securityContext:
|
||||
privileged: true
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-mock
|
||||
type: DirectoryOrCreate
|
104
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml
generated
vendored
Normal file
104
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml
generated
vendored
Normal file
@ -0,0 +1,104 @@
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-mockplugin
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-mockplugin
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-mockplugin
|
||||
spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
# Topology support is needed for the pod rescheduling test
|
||||
# ("storage capacity" in csi_mock_volume.go).
|
||||
- "--feature-gates=Topology=true"
|
||||
- "-v=5"
|
||||
# Needed for fsGroup support.
|
||||
- "--default-fstype=ext4"
|
||||
# We don't need much concurrency and having many gouroutines
|
||||
# makes klog.Fatal during shutdown very long.
|
||||
- "--worker-threads=5"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- name: driver-registrar
|
||||
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi-mock/csi.sock
|
||||
env:
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- mountPath: /registration
|
||||
name: registration-dir
|
||||
- name: mock
|
||||
image: k8s.gcr.io/sig-storage/hostpathplugin:v1.7.3
|
||||
args:
|
||||
- "--drivername=mock.storage.k8s.io"
|
||||
- "--nodeid=$(KUBE_NODE_NAME)"
|
||||
- "--endpoint=/csi/csi.sock"
|
||||
- "--statedir=/tmp/csi-hotpath-data"
|
||||
- "-v=5" # enabled the gRPC call logging
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: /csi/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- mountPath: /var/lib/kubelet/pods
|
||||
name: kubelet-pods-dir
|
||||
- mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi
|
||||
name: kubelet-csi-dir
|
||||
- mountPath: /dev
|
||||
name: dev-dir
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-mock
|
||||
type: DirectoryOrCreate
|
||||
name: socket-dir
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
type: Directory
|
||||
# mock driver doesn't make mounts and therefore doesn't need mount propagation.
|
||||
# mountPropagation: Bidirectional
|
||||
name: kubelet-pods-dir
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins/kubernetes.io/csi
|
||||
type: DirectoryOrCreate
|
||||
name: kubelet-csi-dir
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins_registry
|
||||
type: Directory
|
||||
name: registration-dir
|
||||
- hostPath:
|
||||
path: /dev
|
||||
type: Directory
|
||||
name: dev-dir
|
7
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driverinfo.yaml
generated
vendored
Normal file
7
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driverinfo.yaml
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: CSIDriver
|
||||
metadata:
|
||||
name: mock.storage.k8s.io
|
||||
# Intentionally no spec. All values in the
|
||||
# spec will be inserted dynamically by PatchCSIDeployment()
|
||||
# in test/e2e/storage/utils/deployment.go.
|
109
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-proxy.yaml
generated
vendored
Normal file
109
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-proxy.yaml
generated
vendored
Normal file
@ -0,0 +1,109 @@
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-mockplugin
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-mockplugin
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-mockplugin
|
||||
spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
# Topology support is needed for the pod rescheduling test
|
||||
# ("storage capacity" in csi_mock_volume.go).
|
||||
- "--feature-gates=Topology=true"
|
||||
- "-v=5"
|
||||
- "--timeout=1m"
|
||||
# Needed for fsGroup support.
|
||||
- "--default-fstype=ext4"
|
||||
# We don't need much concurrency and having many gouroutines
|
||||
# makes klog.Fatal during shutdown very long.
|
||||
- "--worker-threads=5"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- name: driver-registrar
|
||||
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi-mock/csi.sock
|
||||
- --timeout=1m
|
||||
env:
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- mountPath: /registration
|
||||
name: registration-dir
|
||||
- name: mock
|
||||
image: k8s.gcr.io/sig-storage/hostpathplugin:v1.7.3
|
||||
args:
|
||||
- -v=5
|
||||
- -nodeid=$(KUBE_NODE_NAME)
|
||||
- -endpoint=/csi/csi.sock
|
||||
- -proxy-endpoint=tcp://:9000
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: /csi/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
ports:
|
||||
- containerPort: 9000
|
||||
name: socat
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
# The busybox container is needed for running shell commands which
|
||||
# test for directories or create them. It needs additional privileges
|
||||
# for that.
|
||||
- name: busybox
|
||||
image: k8s.gcr.io/e2e-test-images/busybox:1.29-1
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- sleep
|
||||
- "100000"
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/kubelet/pods
|
||||
name: kubelet-pods-dir
|
||||
- mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi
|
||||
name: kubelet-csi-dir
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-mock
|
||||
type: DirectoryOrCreate
|
||||
name: socket-dir
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
type: Directory
|
||||
# mock driver doesn't make mounts and therefore doesn't need mount propagation.
|
||||
# mountPropagation: Bidirectional
|
||||
name: kubelet-pods-dir
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins/kubernetes.io/csi
|
||||
type: DirectoryOrCreate
|
||||
name: kubelet-csi-dir
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins_registry
|
||||
type: Directory
|
||||
name: registration-dir
|
87
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-rbac.yaml
generated
vendored
Normal file
87
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-rbac.yaml
generated
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-mock
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-controller-attacher-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-mock
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: external-attacher-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-controller-provisioner-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-mock
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: external-provisioner-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-controller-cluster-driver-registrar-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-mock
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-driver-registrar-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
# priviledged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding()
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: psp-csi-controller-driver-registrar-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-mock
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: e2e-test-privileged-psp
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-controller-resizer-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-mock
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: external-resizer-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-controller-snapshotter-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-mock
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: external-snapshotter-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
7
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-storageclass.yaml
generated
vendored
Normal file
7
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-storageclass.yaml
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: csi-mock-sc
|
||||
provisioner: csi-mock
|
||||
reclaimPolicy: Delete
|
||||
volumeBindingMode: Immediate
|
141
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/update-hostpath.sh
generated
vendored
Normal file
141
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/update-hostpath.sh
generated
vendored
Normal file
@ -0,0 +1,141 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copyright 2021 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script will update all sidecar RBAC files and the CSI hostpath
|
||||
# deployment files such that they match what is in a hostpath driver
|
||||
# release.
|
||||
#
|
||||
# Beware that this will wipe out all local modifications!
|
||||
|
||||
# Can be a tag or a branch.
|
||||
script="$0"
|
||||
hostpath_version="$1"
|
||||
|
||||
if ! [ "$hostpath_version" ]; then
|
||||
cat >&2 <<EOF
|
||||
Usage: $0 <hostpath tag or branch name>
|
||||
|
||||
Required parameter is missing.
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -xe
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
# Remove stale files.
|
||||
rm -rf external-attacher external-provisioner external-resizer external-snapshotter external-health-monitor hostpath csi-driver-host-path
|
||||
|
||||
# Check out desired release.
|
||||
git clone https://github.com/kubernetes-csi/csi-driver-host-path.git
|
||||
(cd csi-driver-host-path && git checkout "$hostpath_version")
|
||||
trap "rm -rf csi-driver-host-path" EXIT
|
||||
|
||||
# Main YAML files.
|
||||
mkdir hostpath
|
||||
cat >hostpath/README.md <<EOF
|
||||
The files in this directory are exact copys of "kubernetes-latest" in
|
||||
https://github.com/kubernetes-csi/csi-driver-host-path/tree/$hostpath_version/deploy/
|
||||
|
||||
Do not edit manually. Run $script to refresh the content.
|
||||
EOF
|
||||
cp -r csi-driver-host-path/deploy/kubernetes-latest/hostpath hostpath/
|
||||
cat >hostpath/hostpath/e2e-test-rbac.yaml <<EOF
|
||||
# priviledged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding()
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: psp-csi-hostpath-role
|
||||
subjects:
|
||||
# This list of ServiceAccount intentionally covers everything that might
|
||||
# be needed. In practice, only some of these accounts are actually
|
||||
# used.
|
||||
- kind: ServiceAccount
|
||||
name: csi-attacher
|
||||
namespace: default
|
||||
- kind: ServiceAccount
|
||||
name: csi-provisioner
|
||||
namespace: default
|
||||
- kind: ServiceAccount
|
||||
name: csi-snapshotter
|
||||
namespace: default
|
||||
- kind: ServiceAccount
|
||||
name: csi-resizer
|
||||
namespace: default
|
||||
- kind: ServiceAccount
|
||||
name: csi-external-health-monitor-controller
|
||||
namespace: default
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: e2e-test-privileged-psp
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
EOF
|
||||
|
||||
download () {
|
||||
project="$1"
|
||||
path="$2"
|
||||
tag="$3"
|
||||
rbac="$4"
|
||||
|
||||
mkdir -p "$project/$path"
|
||||
url="https://github.com/kubernetes-csi/$project/raw/$tag/deploy/kubernetes/$path/$rbac"
|
||||
cat >"$project/$path/$rbac" <<EOF
|
||||
# Do not edit, downloaded from $url
|
||||
# for csi-driver-host-path $hostpath_version
|
||||
# by $script
|
||||
#
|
||||
EOF
|
||||
curl --fail --location "$url" >>"$project/$path/$rbac"
|
||||
}
|
||||
|
||||
# RBAC files for each sidecar.
|
||||
# This relies on the convention that "external-something" has "csi-something" as image name.
|
||||
# external-health-monitor is special, it has two images.
|
||||
# The repository for each image is ignored.
|
||||
images=$(grep -r '^ *image:.*csi' hostpath/hostpath | sed -e 's;.*image:.*/;;' | grep -v 'node-driver-registrar' | sort -u)
|
||||
for image in $images; do
|
||||
tag=$(echo "$image" | sed -e 's/.*://')
|
||||
path=
|
||||
rbac="rbac.yaml"
|
||||
case $image in
|
||||
csi-external-*)
|
||||
# csi-external-health-monitor-agent:v0.2.0
|
||||
project=$(echo "$image" | sed -e 's/csi-\(.*\)-[^:]*:.*/\1/')
|
||||
path=$(echo "$image" | sed -e 's/csi-\([^:]*\):.*/\1/')
|
||||
;;
|
||||
*)
|
||||
project=$(echo "$image" | sed -e 's/:.*//' -e 's/^csi/external/')
|
||||
case $project in
|
||||
external-snapshotter)
|
||||
# Another special case...
|
||||
path="csi-snapshotter"
|
||||
rbac="rbac-csi-snapshotter.yaml"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
esac
|
||||
download "$project" "$path" "$tag" "$rbac"
|
||||
done
|
||||
|
||||
# Update the mock driver manifests, too.
|
||||
grep -r image: hostpath/hostpath/csi-hostpath-plugin.yaml | while read -r image; do
|
||||
version=$(echo "$image" | sed -e 's/.*:\(.*\)/\1/')
|
||||
image=$(echo "$image" | sed -e 's/.*image: \([^:]*\).*/\1/')
|
||||
sed -i -e "s;$image:.*;$image:$version;" mock/*.yaml
|
||||
done
|
Reference in New Issue
Block a user