vendor update for CSI 0.3.0

This commit is contained in:
gman
2018-07-18 16:47:22 +02:00
parent 6f484f92fc
commit 8ea659f0d5
6810 changed files with 438061 additions and 193861 deletions

44
vendor/k8s.io/kubernetes/cluster/gce/manifests/BUILD generated vendored Normal file
View File

@ -0,0 +1,44 @@
package(default_visibility = ["//visibility:public"])
load("@io_kubernetes_build//defs:build.bzl", "release_filegroup")
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
pkg_tar(
name = "gce-master-manifests",
srcs = [":manifests"],
mode = "0644",
)
# if you update this, also update function kube::release::package_kube_manifests_tarball() in build/lib/release.sh
filegroup(
name = "manifests",
srcs = [
"abac-authz-policy.jsonl",
"cluster-autoscaler.manifest",
"e2e-image-puller.manifest",
"etcd.manifest",
"etcd-empty-dir-cleanup.yaml",
"glbc.manifest",
"kms-plugin-container.manifest",
"kube-addon-manager.yaml",
"kube-apiserver.manifest",
"kube-controller-manager.manifest",
"kube-proxy.manifest",
"kube-scheduler.manifest",
"rescheduler.manifest",
] + glob(["internal-*"]),
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -7,6 +7,9 @@
"labels": {
"tier": "cluster-management",
"component": "cluster-autoscaler"
},
"annotations": {
"seccomp.security.alpha.kubernetes.io/pod": "docker/default"
}
},
"spec": {
@ -14,7 +17,7 @@
"containers": [
{
"name": "cluster-autoscaler",
"image": "k8s.gcr.io/cluster-autoscaler:v1.1.1",
"image": "k8s.gcr.io/cluster-autoscaler:v1.3.0",
"livenessProbe": {
"httpGet": {
"path": "/health-check",
@ -25,7 +28,7 @@
},
"command": [
"./run.sh",
"--kubernetes=http://127.0.0.1:8080?inClusterConfig=f",
"--kubernetes=https://127.0.0.1:443",
"--v=4",
"--logtostderr=true",
"--write-status-configmap=true",
@ -56,6 +59,11 @@
"readOnly": true,
"mountPath": "/usr/share/ca-certificates"
},
{
"name": "srvkube",
"readOnly": true,
"mountPath": "/etc/srv/kubernetes/cluster-autoscaler"
},
{
"name": "logfile",
"mountPath": "/var/log/cluster-autoscaler.log",
@ -80,6 +88,12 @@
"path": "/usr/share/ca-certificates"
}
},
{
"name": "srvkube",
"hostPath": {
"path": "/etc/srv/kubernetes/cluster-autoscaler"
}
},
{
"name": "logfile",
"hostPath": {

View File

@ -34,7 +34,7 @@ spec:
k8s.gcr.io/busybox:1.24
k8s.gcr.io/dnsutils:e2e
k8s.gcr.io/e2e-net-amd64:1.0
k8s.gcr.io/echoserver:1.6
k8s.gcr.io/echoserver:1.10
k8s.gcr.io/eptest:0.1
k8s.gcr.io/fakegitserver:0.1
k8s.gcr.io/galera-install:0.1
@ -69,21 +69,23 @@ spec:
k8s.gcr.io/test-webserver:e2e
k8s.gcr.io/update-demo:kitten
k8s.gcr.io/update-demo:nautilus
k8s.gcr.io/volume-ceph:0.1
k8s.gcr.io/volume-gluster:0.2
k8s.gcr.io/volume-iscsi:0.1
k8s.gcr.io/volume-nfs:0.8
k8s.gcr.io/volume-rbd:0.1
gcr.io/kubernetes-e2e-test-images/volume-ceph:0.1
gcr.io/kubernetes-e2e-test-images/volume-gluster:0.2
gcr.io/kubernetes-e2e-test-images/volume-iscsi:0.1
gcr.io/kubernetes-e2e-test-images/volume-nfs:0.8
gcr.io/kubernetes-e2e-test-images/volume-rbd:0.1
k8s.gcr.io/zookeeper-install-3.5.0-alpha:e2e
gcr.io/google_samples/gb-redisslave:nonexistent
; do echo $(date '+%X') pulling $i; docker pull $i 1>/dev/null; done; exit 0;
; do echo $(date '+%X') pulling $i; crictl pull $i 1>/dev/null; done; exit 0;
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/run/docker.sock
- mountPath: {{ container_runtime_endpoint }}
name: socket
- mountPath: /usr/bin/docker
name: docker
- mountPath: /usr/bin/crictl
name: crictl
- mountPath: /etc/crictl.yaml
name: config
# Add a container that runs a health-check
- name: nethealth-check
resources:
@ -98,13 +100,17 @@ spec:
- "/usr/bin/nethealth || true"
volumes:
- hostPath:
path: /var/run/docker.sock
path: {{ container_runtime_endpoint }}
type: Socket
name: socket
- hostPath:
path: /usr/bin/docker
path: /home/kubernetes/bin/crictl
type: File
name: docker
name: crictl
- hostPath:
path: /etc/crictl.yaml
type: File
name: config
# This pod is really fire-and-forget.
restartPolicy: OnFailure
# This pod needs hostNetworking for true VM perf measurement as well as avoiding cbr0 issues

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Pod
metadata:
name: etcd-empty-dir-cleanup
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
labels:
k8s-app: etcd-empty-dir-cleanup
spec:
priorityClassName: system-node-critical
hostNetwork: true
dnsPolicy: Default
containers:
- name: etcd-empty-dir-cleanup
image: k8s.gcr.io/etcd-empty-dir-cleanup:3.2.18.0

View File

@ -5,7 +5,8 @@
"name":"etcd-server{{ suffix }}",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": ""
"scheduler.alpha.kubernetes.io/critical-pod": "",
"seccomp.security.alpha.kubernetes.io/pod": "docker/default"
}
},
"spec":{
@ -13,7 +14,7 @@
"containers":[
{
"name": "etcd-container",
"image": "{{ pillar.get('etcd_docker_repository', 'k8s.gcr.io/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.2.14') }}",
"image": "{{ pillar.get('etcd_docker_repository', 'k8s.gcr.io/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.2.18-0') }}",
"resources": {
"requests": {
"cpu": {{ cpulimit }}
@ -22,20 +23,32 @@
"command": [
"/bin/sh",
"-c",
"if [ -e /usr/local/bin/migrate-if-needed.sh ]; then /usr/local/bin/migrate-if-needed.sh 1>>/var/log/etcd{{ suffix }}.log 2>&1; fi; exec /usr/local/bin/etcd --name etcd-{{ hostname }} --listen-peer-urls {{ etcd_protocol }}://{{ host_ip }}:{{ server_port }} --initial-advertise-peer-urls {{ etcd_protocol }}://{{ hostname }}:{{ server_port }} --advertise-client-urls http://127.0.0.1:{{ port }} --listen-client-urls http://127.0.0.1:{{ port }} {{ quota_bytes }} --data-dir /var/etcd/data{{ suffix }} --initial-cluster-state {{ cluster_state }} --initial-cluster {{ etcd_cluster }} {{ etcd_creds }} 1>>/var/log/etcd{{ suffix }}.log 2>&1"
"if [ -e /usr/local/bin/migrate-if-needed.sh ]; then /usr/local/bin/migrate-if-needed.sh 1>>/var/log/etcd{{ suffix }}.log 2>&1; fi; exec /usr/local/bin/etcd --name etcd-{{ hostname }} --listen-peer-urls {{ etcd_protocol }}://{{ host_ip }}:{{ server_port }} --initial-advertise-peer-urls {{ etcd_protocol }}://{{ hostname }}:{{ server_port }} --advertise-client-urls http://127.0.0.1:{{ port }} --listen-client-urls http://127.0.0.1:{{ port }} {{ quota_bytes }} --data-dir /var/etcd/data{{ suffix }} --initial-cluster-state {{ cluster_state }} --initial-cluster {{ etcd_cluster }} {{ etcd_creds }} {{ etcd_extra_args }} 1>>/var/log/etcd{{ suffix }}.log 2>&1"
],
"env": [
{ "name": "TARGET_STORAGE",
"value": "{{ pillar.get('storage_backend', 'etcd3') }}"
},
{ "name": "TARGET_VERSION",
"value": "{{ pillar.get('etcd_version', '3.2.14') }}"
"value": "{{ pillar.get('etcd_version', '3.2.18') }}"
},
{ "name": "DATA_DIRECTORY",
"value": "/var/etcd/data{{ suffix }}"
},
{ "name": "INITIAL_CLUSTER",
"value": "{{ etcd_cluster }}"
},
{ "name": "LISTEN_PEER_URLS",
"value": "{{ etcd_protocol }}://{{ host_ip }}:{{ server_port }}"
},
{ "name": "INITIAL_ADVERTISE_PEER_URLS",
"value": "{{ etcd_protocol }}://{{ hostname }}:{{ server_port }}"
},
{ "name": "ETCD_CREDS",
"value": "{{ etcd_creds }}"
},
{ "name": "ETCD_SNAPSHOT_COUNT",
"value": "10000"
}
],
"livenessProbe": {
@ -50,7 +63,7 @@
"ports": [
{ "name": "serverport",
"containerPort": {{ server_port }},
"hostPort": {{ server_port }}
"hostPort": {{ server_port }}
},
{ "name": "clientport",
"containerPort": {{ port }},

View File

@ -1,19 +1,20 @@
apiVersion: v1
kind: Pod
metadata:
name: l7-lb-controller-v0.9.8-alpha.2
name: l7-lb-controller-v1.1.1
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
labels:
k8s-app: gcp-lb-controller
version: v0.9.8-alpha.2
version: v1.1.1
kubernetes.io/name: "GLBC"
spec:
terminationGracePeriodSeconds: 600
hostNetwork: true
containers:
- image: k8s.gcr.io/ingress-gce-glbc-amd64:0.9.8-alpha.2
- image: k8s.gcr.io/ingress-gce-glbc-amd64:v1.1.1
livenessProbe:
httpGet:
path: /healthz
@ -44,7 +45,7 @@ spec:
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
- sh
- -c
- 'exec /glbc --verbose --apiserver-host=http://localhost:8080 --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1'
- 'exec /glbc --gce-ratelimit=ga.Operations.Get,qps,10,100 --gce-ratelimit=alpha.Operations.Get,qps,10,100 --gce-ratelimit=ga.BackendServices.Get,qps,1.8,1 --gce-ratelimit=ga.HealthChecks.Get,qps,1.8,1 --gce-ratelimit=alpha.HealthChecks.Get,qps,1.8,1 --verbose --apiserver-host=http://localhost:8080 --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1'
volumes:
- hostPath:
path: /etc/gce.conf

View File

@ -0,0 +1,8 @@
{
"name": "kms-plugin",
"image": "gcr.io/google-containers/k8s-cloud-kms-plugin:v0.1.1",
"command": ["/k8s-cloud-kms-plugin", "--key-uri={{kms_key_uri}}", "--path-to-unix-socket={{kms_path_to_socket}}", "--gce-config={{gce_conf_path}}", "--logtostderr", "2>\&1"],
"livenessProbe": { "httpGet": {"host": "127.0.0.1", "port": 8081, "path": "/healthz"}, "initialDelaySeconds": 3, "timeoutSeconds": 3},
"ports":[{ "name": "healthz", "containerPort": 8081, "hostPort": 8081}, { "name": "metrics", "containerPort": 8082, "hostPort": 8082}],
"volumeMounts": [{{cloud_config_mount}}, {{kms_socket_mount}}]
}

View File

@ -5,6 +5,7 @@ metadata:
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
labels:
component: kube-addon-manager
spec:

View File

@ -5,7 +5,8 @@
"name":"kube-apiserver",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": ""
"scheduler.alpha.kubernetes.io/critical-pod": "",
"seccomp.security.alpha.kubernetes.io/pod": "docker/default"
},
"labels": {
"tier": "control-plane",
@ -15,6 +16,7 @@
"spec":{
"hostNetwork": true,
"containers":[
{{kms_plugin_container}}
{
"name": "kube-apiserver",
"image": "{{pillar['kube_docker_registry']}}/kube-apiserver:{{pillar['kube-apiserver_docker_tag']}}",
@ -47,6 +49,8 @@
"hostPort": 8080}
],
"volumeMounts": [
{{kms_socket_mount}}
{{encryption_provider_mount}}
{{cloud_config_mount}}
{{additional_cloud_config_mount}}
{{webhook_config_mount}}
@ -86,6 +90,8 @@
}
],
"volumes":[
{{kms_socket_volume}}
{{encryption_provider_volume}}
{{cloud_config_volume}}
{{additional_cloud_config_volume}}
{{webhook_config_volume}}

View File

@ -5,7 +5,8 @@
"name":"kube-controller-manager",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": ""
"scheduler.alpha.kubernetes.io/critical-pod": "",
"seccomp.security.alpha.kubernetes.io/pod": "docker/default"
},
"labels": {
"tier": "control-plane",

View File

@ -14,7 +14,7 @@ metadata:
tier: node
component: kube-proxy
spec:
{{pod_priority}}
priorityClassName: system-node-critical
hostNetwork: true
tolerations:
- operator: "Exists"

View File

@ -5,7 +5,8 @@
"name":"kube-scheduler",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": ""
"scheduler.alpha.kubernetes.io/critical-pod": "",
"seccomp.security.alpha.kubernetes.io/pod": "docker/default"
},
"labels": {
"tier": "control-plane",

View File

@ -1,19 +1,19 @@
apiVersion: v1
kind: Pod
metadata:
name: rescheduler-v0.3.1
name: rescheduler-v0.4.0
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
k8s-app: rescheduler
version: v0.3.1
version: v0.4.0
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Rescheduler"
spec:
hostNetwork: true
containers:
- image: k8s.gcr.io/rescheduler:v0.3.1
- image: k8s.gcr.io/rescheduler:v0.4.0
name: rescheduler
volumeMounts:
- mountPath: /var/log/rescheduler.log