ceph-csi/vendor/k8s.io/kubernetes/cluster/gce/manifests/e2e-image-puller.manifest
2018-07-31 14:53:26 +02:00

118 lines
3.8 KiB
Plaintext

# e2e-image-puller seeds nodes in an e2e cluster with test images.
apiVersion: v1
kind: Pod
metadata:
name: e2e-image-puller
namespace: kube-system
labels:
name: e2e-image-puller
spec:
containers:
- name: image-puller
resources:
requests:
cpu: 100m
limits:
cpu: 100m
image: k8s.gcr.io/busybox:1.24
# TODO: Replace this with a go script that pulls in parallel?
# Currently it takes ~5m to pull all e2e images, so this is OK, and
# fewer moving parts is always better.
# TODO: Replace the hardcoded image list with an autogen list; the list is
# currently hard-coded for static verification. It was generated via:
# grep -Iiroh "gcr.io/.*" "${KUBE_ROOT}/test/e2e" | \
# sed -e "s/[,\")}]//g" | awk '{print $1}' | sort | uniq | tr '\n' ' '
# We always want the subshell to exit 0 so this pod doesn't end up
# blocking tests in an Error state.
command:
- /bin/sh
- -c
- >
for i in
k8s.gcr.io/alpine-with-bash:1.0
k8s.gcr.io/apparmor-loader:0.1
k8s.gcr.io/busybox:1.24
k8s.gcr.io/dnsutils:e2e
k8s.gcr.io/e2e-net-amd64:1.0
k8s.gcr.io/echoserver:1.10
k8s.gcr.io/eptest:0.1
k8s.gcr.io/fakegitserver:0.1
k8s.gcr.io/galera-install:0.1
k8s.gcr.io/invalid-image:invalid-tag
k8s.gcr.io/iperf:e2e
k8s.gcr.io/jessie-dnsutils:e2e
k8s.gcr.io/k8s-dns-dnsmasq-amd64:1.14.5
k8s.gcr.io/liveness:e2e
k8s.gcr.io/logs-generator:v0.1.0
k8s.gcr.io/mounttest:0.8
k8s.gcr.io/mounttest-user:0.5
k8s.gcr.io/mysql-galera:e2e
k8s.gcr.io/mysql-healthz:1.0
k8s.gcr.io/netexec:1.4
k8s.gcr.io/netexec:1.5
k8s.gcr.io/netexec:1.7
k8s.gcr.io/nettest:1.7
k8s.gcr.io/nginx:1.7.9
k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.1
k8s.gcr.io/nginx-slim:0.7
k8s.gcr.io/nginx-slim:0.8
k8s.gcr.io/node-problem-detector:v0.3.0
k8s.gcr.io/pause
k8s.gcr.io/porter:4524579c0eb935c056c8e75563b4e1eda31587e0
k8s.gcr.io/portforwardtester:1.2
k8s.gcr.io/redis-install-3.2.0:e2e
k8s.gcr.io/resource_consumer:beta4
k8s.gcr.io/resource_consumer/controller:beta4
gcr.io/kubernetes-e2e-test-images/serve-hostname-amd64:1.1
gcr.io/kubernetes-e2e-test-images/hostexec-amd64:1.1
k8s.gcr.io/servicelb:0.1
k8s.gcr.io/test-webserver:e2e
k8s.gcr.io/update-demo:kitten
k8s.gcr.io/update-demo:nautilus
gcr.io/kubernetes-e2e-test-images/volume-ceph:0.1
gcr.io/kubernetes-e2e-test-images/volume-gluster:0.2
gcr.io/kubernetes-e2e-test-images/volume-iscsi:0.1
gcr.io/kubernetes-e2e-test-images/volume-nfs:0.8
gcr.io/kubernetes-e2e-test-images/volume-rbd:0.1
k8s.gcr.io/zookeeper-install-3.5.0-alpha:e2e
gcr.io/google_samples/gb-redisslave:nonexistent
; do echo $(date '+%X') pulling $i; crictl pull $i 1>/dev/null; done; exit 0;
securityContext:
privileged: true
volumeMounts:
- mountPath: {{ container_runtime_endpoint }}
name: socket
- mountPath: /usr/bin/crictl
name: crictl
- mountPath: /etc/crictl.yaml
name: config
# Add a container that runs a health-check
- name: nethealth-check
resources:
requests:
cpu: 100m
limits:
cpu: 100m
image: k8s.gcr.io/kube-nethealth-amd64:1.0
command:
- /bin/sh
- -c
- "/usr/bin/nethealth || true"
volumes:
- hostPath:
path: {{ container_runtime_endpoint }}
type: Socket
name: socket
- hostPath:
path: /home/kubernetes/bin/crictl
type: File
name: crictl
- hostPath:
path: /etc/crictl.yaml
type: File
name: config
# This pod is really fire-and-forget.
restartPolicy: OnFailure
# This pod needs hostNetworking for true VM perf measurement as well as avoiding cbr0 issues
hostNetwork: true