vendor updates

This commit is contained in:
Serguei Bezverkhi
2018-03-06 17:33:18 -05:00
parent 4b3ebc171b
commit e9033989a0
5854 changed files with 248382 additions and 119809 deletions

View File

@ -0,0 +1,7 @@
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"admin", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"{{kube_user}}", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kubelet", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kube_proxy", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kubecfg", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"client", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"group":"system:serviceaccounts", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}

View File

@ -0,0 +1,93 @@
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "cluster-autoscaler",
"namespace": "kube-system",
"labels": {
"tier": "cluster-management",
"component": "cluster-autoscaler"
}
},
"spec": {
"hostNetwork": true,
"containers": [
{
"name": "cluster-autoscaler",
"image": "k8s.gcr.io/cluster-autoscaler:v1.1.1",
"livenessProbe": {
"httpGet": {
"path": "/health-check",
"port": 8085
},
"initialDelaySeconds": 600,
"periodSeconds": 60
},
"command": [
"./run.sh",
"--kubernetes=http://127.0.0.1:8080?inClusterConfig=f",
"--v=4",
"--logtostderr=true",
"--write-status-configmap=true",
"--balance-similar-node-groups=true",
"{{params}}"
],
"env": [
{
"name": "LOG_OUTPUT",
"value": "/var/log/cluster-autoscaler.log"
}
],
"resources": {
"requests": {
"cpu": "10m",
"memory": "300Mi"
}
},
"volumeMounts": [
{{cloud_config_mount}}
{
"name": "ssl-certs",
"readOnly": true,
"mountPath": "/etc/ssl/certs"
},
{
"name": "usrsharecacerts",
"readOnly": true,
"mountPath": "/usr/share/ca-certificates"
},
{
"name": "logfile",
"mountPath": "/var/log/cluster-autoscaler.log",
"readOnly": false
}
],
"terminationMessagePath": "/dev/termination-log",
"imagePullPolicy": "IfNotPresent"
}
],
"volumes": [
{{cloud_config_volume}}
{
"name": "ssl-certs",
"hostPath": {
"path": "/etc/ssl/certs"
}
},
{
"name": "usrsharecacerts",
"hostPath": {
"path": "/usr/share/ca-certificates"
}
},
{
"name": "logfile",
"hostPath": {
"path": "/var/log/cluster-autoscaler.log",
"type": "FileOrCreate"
}
}
],
"restartPolicy": "Always"
}
}

View File

@ -0,0 +1,111 @@
# e2e-image-puller seeds nodes in an e2e cluster with test images.
apiVersion: v1
kind: Pod
metadata:
name: e2e-image-puller
namespace: kube-system
labels:
name: e2e-image-puller
spec:
containers:
- name: image-puller
resources:
requests:
cpu: 100m
limits:
cpu: 100m
image: k8s.gcr.io/busybox:1.24
# TODO: Replace this with a go script that pulls in parallel?
# Currently it takes ~5m to pull all e2e images, so this is OK, and
# fewer moving parts is always better.
# TODO: Replace the hardcoded image list with an autogen list; the list is
# currently hard-coded for static verification. It was generated via:
# grep -Iiroh "gcr.io/.*" "${KUBE_ROOT}/test/e2e" | \
# sed -e "s/[,\")}]//g" | awk '{print $1}' | sort | uniq | tr '\n' ' '
# We always want the subshell to exit 0 so this pod doesn't end up
# blocking tests in an Error state.
command:
- /bin/sh
- -c
- >
for i in
k8s.gcr.io/alpine-with-bash:1.0
k8s.gcr.io/apparmor-loader:0.1
k8s.gcr.io/busybox:1.24
k8s.gcr.io/dnsutils:e2e
k8s.gcr.io/e2e-net-amd64:1.0
k8s.gcr.io/echoserver:1.6
k8s.gcr.io/eptest:0.1
k8s.gcr.io/fakegitserver:0.1
k8s.gcr.io/galera-install:0.1
k8s.gcr.io/invalid-image:invalid-tag
k8s.gcr.io/iperf:e2e
k8s.gcr.io/jessie-dnsutils:e2e
k8s.gcr.io/k8s-dns-dnsmasq-amd64:1.14.5
k8s.gcr.io/liveness:e2e
k8s.gcr.io/logs-generator:v0.1.0
k8s.gcr.io/mounttest:0.8
k8s.gcr.io/mounttest-user:0.5
k8s.gcr.io/mysql-galera:e2e
k8s.gcr.io/mysql-healthz:1.0
k8s.gcr.io/netexec:1.4
k8s.gcr.io/netexec:1.5
k8s.gcr.io/netexec:1.7
k8s.gcr.io/nettest:1.7
k8s.gcr.io/nginx:1.7.9
k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.1
k8s.gcr.io/nginx-slim:0.7
k8s.gcr.io/nginx-slim:0.8
k8s.gcr.io/node-problem-detector:v0.3.0
k8s.gcr.io/pause
k8s.gcr.io/porter:4524579c0eb935c056c8e75563b4e1eda31587e0
k8s.gcr.io/portforwardtester:1.2
k8s.gcr.io/redis-install-3.2.0:e2e
k8s.gcr.io/resource_consumer:beta4
k8s.gcr.io/resource_consumer/controller:beta4
gcr.io/kubernetes-e2e-test-images/serve-hostname-amd64:1.1
gcr.io/kubernetes-e2e-test-images/hostexec-amd64:1.1
k8s.gcr.io/servicelb:0.1
k8s.gcr.io/test-webserver:e2e
k8s.gcr.io/update-demo:kitten
k8s.gcr.io/update-demo:nautilus
k8s.gcr.io/volume-ceph:0.1
k8s.gcr.io/volume-gluster:0.2
k8s.gcr.io/volume-iscsi:0.1
k8s.gcr.io/volume-nfs:0.8
k8s.gcr.io/volume-rbd:0.1
k8s.gcr.io/zookeeper-install-3.5.0-alpha:e2e
gcr.io/google_samples/gb-redisslave:nonexistent
; do echo $(date '+%X') pulling $i; docker pull $i 1>/dev/null; done; exit 0;
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/run/docker.sock
name: socket
- mountPath: /usr/bin/docker
name: docker
# Add a container that runs a health-check
- name: nethealth-check
resources:
requests:
cpu: 100m
limits:
cpu: 100m
image: k8s.gcr.io/kube-nethealth-amd64:1.0
command:
- /bin/sh
- -c
- "/usr/bin/nethealth || true"
volumes:
- hostPath:
path: /var/run/docker.sock
type: Socket
name: socket
- hostPath:
path: /usr/bin/docker
type: File
name: docker
# This pod is really fire-and-forget.
restartPolicy: OnFailure
# This pod needs hostNetworking for true VM perf measurement as well as avoiding cbr0 issues
hostNetwork: true

View File

@ -0,0 +1,91 @@
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"etcd-server{{ suffix }}",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": ""
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "etcd-container",
"image": "{{ pillar.get('etcd_docker_repository', 'k8s.gcr.io/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.2.14') }}",
"resources": {
"requests": {
"cpu": {{ cpulimit }}
}
},
"command": [
"/bin/sh",
"-c",
"if [ -e /usr/local/bin/migrate-if-needed.sh ]; then /usr/local/bin/migrate-if-needed.sh 1>>/var/log/etcd{{ suffix }}.log 2>&1; fi; exec /usr/local/bin/etcd --name etcd-{{ hostname }} --listen-peer-urls {{ etcd_protocol }}://{{ host_ip }}:{{ server_port }} --initial-advertise-peer-urls {{ etcd_protocol }}://{{ hostname }}:{{ server_port }} --advertise-client-urls http://127.0.0.1:{{ port }} --listen-client-urls http://127.0.0.1:{{ port }} {{ quota_bytes }} --data-dir /var/etcd/data{{ suffix }} --initial-cluster-state {{ cluster_state }} --initial-cluster {{ etcd_cluster }} {{ etcd_creds }} 1>>/var/log/etcd{{ suffix }}.log 2>&1"
],
"env": [
{ "name": "TARGET_STORAGE",
"value": "{{ pillar.get('storage_backend', 'etcd3') }}"
},
{ "name": "TARGET_VERSION",
"value": "{{ pillar.get('etcd_version', '3.2.14') }}"
},
{ "name": "DATA_DIRECTORY",
"value": "/var/etcd/data{{ suffix }}"
},
{ "name": "INITIAL_CLUSTER",
"value": "{{ etcd_cluster }}"
}
],
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": {{ port }},
"path": "/health"
},
"initialDelaySeconds": {{ liveness_probe_initial_delay }},
"timeoutSeconds": 15
},
"ports": [
{ "name": "serverport",
"containerPort": {{ server_port }},
"hostPort": {{ server_port }}
},
{ "name": "clientport",
"containerPort": {{ port }},
"hostPort": {{ port }}
}
],
"volumeMounts": [
{ "name": "varetcd",
"mountPath": "/var/etcd",
"readOnly": false
},
{ "name": "varlogetcd",
"mountPath": "/var/log/etcd{{ suffix }}.log",
"readOnly": false
},
{ "name": "etc",
"mountPath": "{{ srv_kube_path }}",
"readOnly": false
}
]
}
],
"volumes":[
{ "name": "varetcd",
"hostPath": {
"path": "/mnt/master-pd/var/etcd"}
},
{ "name": "varlogetcd",
"hostPath": {
"path": "/var/log/etcd{{ suffix }}.log",
"type": "FileOrCreate"}
},
{ "name": "etc",
"hostPath": {
"path": "{{ srv_kube_path }}"}
}
]
}}

View File

@ -0,0 +1,56 @@
apiVersion: v1
kind: Pod
metadata:
name: l7-lb-controller-v0.9.8-alpha.2
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
k8s-app: gcp-lb-controller
version: v0.9.8-alpha.2
kubernetes.io/name: "GLBC"
spec:
terminationGracePeriodSeconds: 600
hostNetwork: true
containers:
- image: k8s.gcr.io/ingress-gce-glbc-amd64:0.9.8-alpha.2
livenessProbe:
httpGet:
path: /healthz
port: 8086
scheme: HTTP
initialDelaySeconds: 30
# healthz reaches out to GCE
periodSeconds: 30
timeoutSeconds: 15
successThreshold: 1
failureThreshold: 5
name: l7-lb-controller
volumeMounts:
- mountPath: /etc/gce.conf
name: cloudconfig
readOnly: true
- mountPath: /var/log/glbc.log
name: logfile
readOnly: false
resources:
# Request is set to accommodate this pod alongside the other
# master components on a single core master.
# TODO: Make resource requirements depend on the size of the cluster
requests:
cpu: 10m
memory: 50Mi
command:
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
- sh
- -c
- 'exec /glbc --verbose --apiserver-host=http://localhost:8080 --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1'
volumes:
- hostPath:
path: /etc/gce.conf
type: FileOrCreate
name: cloudconfig
- hostPath:
path: /var/log/glbc.log
type: FileOrCreate
name: logfile

View File

@ -0,0 +1,38 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-addon-manager
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
component: kube-addon-manager
spec:
hostNetwork: true
containers:
- name: kube-addon-manager
# When updating version also bump it in:
# - test/kubemark/resources/manifests/kube-addon-manager.yaml
image: k8s.gcr.io/kube-addon-manager:v8.6
command:
- /bin/bash
- -c
- exec /opt/kube-addons.sh 1>>/var/log/kube-addon-manager.log 2>&1
resources:
requests:
cpu: 5m
memory: 50Mi
volumeMounts:
- mountPath: /etc/kubernetes/
name: addons
readOnly: true
- mountPath: /var/log
name: varlog
readOnly: false
volumes:
- hostPath:
path: /etc/kubernetes/
name: addons
- hostPath:
path: /var/log
name: varlog

View File

@ -0,0 +1,136 @@
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"kube-apiserver",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": ""
},
"labels": {
"tier": "control-plane",
"component": "kube-apiserver"
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "kube-apiserver",
"image": "{{pillar['kube_docker_registry']}}/kube-apiserver:{{pillar['kube-apiserver_docker_tag']}}",
"resources": {
"requests": {
"cpu": "250m"
}
},
"command": [
"/bin/sh",
"-c",
"exec /usr/local/bin/kube-apiserver {{params}} --allow-privileged={{pillar['allow_privileged']}} 1>>/var/log/kube-apiserver.log 2>&1"
],
{{container_env}}
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": 8080,
"path": "/healthz"
},
"initialDelaySeconds": {{liveness_probe_initial_delay}},
"timeoutSeconds": 15
},
"ports":[
{ "name": "https",
"containerPort": {{secure_port}},
"hostPort": {{secure_port}}},{
"name": "local",
"containerPort": 8080,
"hostPort": 8080}
],
"volumeMounts": [
{{cloud_config_mount}}
{{additional_cloud_config_mount}}
{{webhook_config_mount}}
{{webhook_authn_config_mount}}
{{audit_policy_config_mount}}
{{audit_webhook_config_mount}}
{{admission_controller_config_mount}}
{{image_policy_webhook_config_mount}}
{ "name": "srvkube",
"mountPath": "{{srv_kube_path}}",
"readOnly": true},
{ "name": "logfile",
"mountPath": "/var/log/kube-apiserver.log",
"readOnly": false},
{ "name": "auditlogfile",
"mountPath": "/var/log/kube-apiserver-audit.log",
"readOnly": false},
{ "name": "etcssl",
"mountPath": "/etc/ssl",
"readOnly": true},
{ "name": "usrsharecacerts",
"mountPath": "/usr/share/ca-certificates",
"readOnly": true},
{ "name": "varssl",
"mountPath": "/var/ssl",
"readOnly": true},
{ "name": "etcopenssl",
"mountPath": "/etc/openssl",
"readOnly": true},
{ "name": "etcpki",
"mountPath": "/etc/srv/pki",
"readOnly": true},
{ "name": "srvsshproxy",
"mountPath": "{{srv_sshproxy_path}}",
"readOnly": false}
]
}
],
"volumes":[
{{cloud_config_volume}}
{{additional_cloud_config_volume}}
{{webhook_config_volume}}
{{webhook_authn_config_volume}}
{{audit_policy_config_volume}}
{{audit_webhook_config_volume}}
{{admission_controller_config_volume}}
{{image_policy_webhook_config_volume}}
{ "name": "srvkube",
"hostPath": {
"path": "{{srv_kube_path}}"}
},
{ "name": "logfile",
"hostPath": {
"path": "/var/log/kube-apiserver.log",
"type": "FileOrCreate"}
},
{ "name": "auditlogfile",
"hostPath": {
"path": "/var/log/kube-apiserver-audit.log",
"type": "FileOrCreate"}
},
{ "name": "etcssl",
"hostPath": {
"path": "/etc/ssl"}
},
{ "name": "usrsharecacerts",
"hostPath": {
"path": "/usr/share/ca-certificates"}
},
{ "name": "varssl",
"hostPath": {
"path": "/var/ssl"}
},
{ "name": "etcopenssl",
"hostPath": {
"path": "/etc/openssl"}
},
{ "name": "etcpki",
"hostPath": {
"path": "/etc/srv/pki"}
},
{ "name": "srvsshproxy",
"hostPath": {
"path": "{{srv_sshproxy_path}}"}
}
]
}}

View File

@ -0,0 +1,105 @@
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"kube-controller-manager",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": ""
},
"labels": {
"tier": "control-plane",
"component": "kube-controller-manager"
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "kube-controller-manager",
"image": "{{pillar['kube_docker_registry']}}/kube-controller-manager:{{pillar['kube-controller-manager_docker_tag']}}",
"resources": {
"requests": {
"cpu": "200m"
}
},
"command": [
"/bin/sh",
"-c",
"exec /usr/local/bin/kube-controller-manager {{params}} 1>>/var/log/kube-controller-manager.log 2>&1"
],
{{container_env}}
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": 10252,
"path": "/healthz"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15
},
"volumeMounts": [
{{cloud_config_mount}}
{{additional_cloud_config_mount}}
{{pv_recycler_mount}}
{ "name": "srvkube",
"mountPath": "{{srv_kube_path}}",
"readOnly": true},
{{flexvolume_hostpath_mount}}
{ "name": "logfile",
"mountPath": "/var/log/kube-controller-manager.log",
"readOnly": false},
{ "name": "etcssl",
"mountPath": "/etc/ssl",
"readOnly": true},
{ "name": "usrsharecacerts",
"mountPath": "/usr/share/ca-certificates",
"readOnly": true},
{ "name": "varssl",
"mountPath": "/var/ssl",
"readOnly": true},
{ "name": "etcopenssl",
"mountPath": "/etc/openssl",
"readOnly": true},
{ "name": "etcpki",
"mountPath": "/etc/pki",
"readOnly": true}
]
}
],
"volumes":[
{{cloud_config_volume}}
{{additional_cloud_config_volume}}
{{pv_recycler_volume}}
{ "name": "srvkube",
"hostPath": {
"path": "{{srv_kube_path}}"}
},
{{flexvolume_hostpath}}
{ "name": "logfile",
"hostPath": {
"path": "/var/log/kube-controller-manager.log",
"type": "FileOrCreate"}
},
{ "name": "etcssl",
"hostPath": {
"path": "/etc/ssl"}
},
{ "name": "usrsharecacerts",
"hostPath": {
"path": "/usr/share/ca-certificates"}
},
{ "name": "varssl",
"hostPath": {
"path": "/var/ssl"}
},
{ "name": "etcopenssl",
"hostPath": {
"path": "/etc/openssl"}
},
{ "name": "etcpki",
"hostPath": {
"path": "/etc/pki"}
}
]
}}

View File

@ -0,0 +1,78 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-proxy
namespace: kube-system
# This annotation ensures that kube-proxy does not get evicted if the node
# supports critical pod annotation based priority scheme.
# Note that kube-proxy runs as a static pod so this annotation does NOT have
# any effect on rescheduler (default scheduler and rescheduler are not
# involved in scheduling kube-proxy).
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
tier: node
component: kube-proxy
spec:
{{pod_priority}}
hostNetwork: true
tolerations:
- operator: "Exists"
effect: "NoExecute"
- operator: "Exists"
effect: "NoSchedule"
containers:
- name: kube-proxy
image: {{pillar['kube_docker_registry']}}/kube-proxy:{{pillar['kube-proxy_docker_tag']}}
resources:
requests:
cpu: {{ cpurequest }}
command:
- /bin/sh
- -c
- exec kube-proxy {{api_servers_with_port}} {{kubeconfig}} {{cluster_cidr}} --resource-container="" --oom-score-adj=-998 {{params}} 1>>/var/log/kube-proxy.log 2>&1
{{container_env}}
{{kube_cache_mutation_detector_env_name}}
{{kube_cache_mutation_detector_env_value}}
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/ssl/certs
name: etc-ssl-certs
readOnly: true
- mountPath: /usr/share/ca-certificates
name: usr-ca-certs
readOnly: true
- mountPath: /var/log
name: varlog
readOnly: false
- mountPath: /var/lib/kube-proxy/kubeconfig
name: kubeconfig
readOnly: false
- mountPath: /run/xtables.lock
name: iptableslock
readOnly: false
- mountPath: /lib/modules
name: lib-modules
readOnly: true
volumes:
- hostPath:
path: /usr/share/ca-certificates
name: usr-ca-certs
- hostPath:
path: /etc/ssl/certs
name: etc-ssl-certs
- hostPath:
path: /var/lib/kube-proxy/kubeconfig
type: FileOrCreate
name: kubeconfig
- hostPath:
path: /var/log
name: varlog
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: iptableslock
- name: lib-modules
hostPath:
path: /lib/modules

View File

@ -0,0 +1,64 @@
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"kube-scheduler",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": ""
},
"labels": {
"tier": "control-plane",
"component": "kube-scheduler"
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "kube-scheduler",
"image": "{{pillar['kube_docker_registry']}}/kube-scheduler:{{pillar['kube-scheduler_docker_tag']}}",
"resources": {
"requests": {
"cpu": "75m"
}
},
"command": [
"/bin/sh",
"-c",
"exec /usr/local/bin/kube-scheduler {{params}} 1>>/var/log/kube-scheduler.log 2>&1"
],
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": 10251,
"path": "/healthz"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15
},
"volumeMounts": [
{
"name": "logfile",
"mountPath": "/var/log/kube-scheduler.log",
"readOnly": false
},
{
"name": "srvkube",
"mountPath": "{{srv_kube_path}}",
"readOnly": true
}
]
}
],
"volumes":[
{
"name": "srvkube",
"hostPath": {"path": "{{srv_kube_path}}"}
},
{
"name": "logfile",
"hostPath": {"path": "/var/log/kube-scheduler.log", "type": "FileOrCreate"}
}
]
}}

View File

@ -0,0 +1,36 @@
apiVersion: v1
kind: Pod
metadata:
name: rescheduler-v0.3.1
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
k8s-app: rescheduler
version: v0.3.1
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Rescheduler"
spec:
hostNetwork: true
containers:
- image: k8s.gcr.io/rescheduler:v0.3.1
name: rescheduler
volumeMounts:
- mountPath: /var/log/rescheduler.log
name: logfile
readOnly: false
# TODO: Make resource requirements depend on the size of the cluster
resources:
requests:
cpu: 10m
memory: 100Mi
command:
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
- sh
- -c
- 'exec /rescheduler --running-in-cluster=false 1>>/var/log/rescheduler.log 2>&1'
volumes:
- hostPath:
path: /var/log/rescheduler.log
type: FileOrCreate
name: logfile