vendor updates

This commit is contained in:
Serguei Bezverkhi
2018-03-06 17:33:18 -05:00
parent 4b3ebc171b
commit e9033989a0
5854 changed files with 248382 additions and 119809 deletions

View File

@ -1,6 +1,6 @@
package(default_visibility = ["//visibility:public"])
load("@io_bazel//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
filegroup(
name = "package-srcs",
@ -20,33 +20,19 @@ filegroup(
"//cluster/images/etcd/rollback:all-srcs",
"//cluster/images/hyperkube:all-srcs",
"//cluster/images/kubemark:all-srcs",
"//cluster/lib:all-srcs",
"//cluster/saltbase:all-srcs",
],
tags = ["automanaged"],
)
# All of the manifests that are expected to be in a "gci-trusty"
# subdir of the manifests tarball.
pkg_tar(
name = "_manifests-gci-trusty",
package_dir = "gci-trusty",
visibility = ["//visibility:private"],
deps = [
"//cluster/addons",
"//cluster/gce:gci-trusty-manifests",
"//cluster/gce/addons",
"//cluster/saltbase:gci-trusty-salt-manifests",
],
)
pkg_tar(
name = "manifests",
mode = "0644",
package_dir = "kubernetes",
package_dir = "kubernetes/gci-trusty",
deps = [
":_manifests-gci-trusty",
"//cluster/saltbase:salt-manifests",
"//cluster/addons",
"//cluster/gce:gce-master-manifests",
"//cluster/gce:gci-trusty-manifests",
"//cluster/gce/addons",
],
)
@ -55,7 +41,6 @@ sh_test(
name = "common_test",
srcs = ["common.sh"],
deps = [
"//cluster/lib",
"//hack/lib",
],
)
@ -64,7 +49,6 @@ sh_test(
name = "clientbin_test",
srcs = ["clientbin.sh"],
deps = [
"//cluster/lib",
"//hack/lib",
],
)
@ -73,7 +57,6 @@ sh_test(
name = "kube-util_test",
srcs = ["kube-util.sh"],
deps = [
"//cluster/lib",
"//hack/lib",
],
)

View File

@ -1,6 +1,6 @@
package(default_visibility = ["//visibility:public"])
load("@io_bazel//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
filegroup(
name = "addon-srcs",
@ -16,10 +16,10 @@ filegroup(
pkg_tar(
name = "addons",
extension = "tar.gz",
files = [
srcs = [
":addon-srcs",
],
extension = "tar.gz",
mode = "0644",
strip_prefix = ".",
)

View File

@ -1,6 +1,13 @@
### Version 8.6 (Tue February 20 2018 Zihong Zheng <zihongz@google.com>)
- Allow reconcile/ensure loop to work with resource under non-kube-system namespace.
- Update kubectl to v1.9.3.
### Version 8.4 (Thu November 30 2017 zou nengren @zouyee)
- Update kubectl to v1.8.4.
### Version 6.5 (Wed October 15 2017 Daniel Kłobuszewski <danielmk@google.com>)
- Support for HA masters.
### Version 6.4-beta.2 (Mon June 12 2017 Jeff Grafton <jgrafton@google.com>)
- Update kubectl to v1.6.4.
- Refresh base images.

View File

@ -12,11 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
IMAGE=gcr.io/google-containers/kube-addon-manager
IMAGE=staging-k8s.gcr.io/kube-addon-manager
ARCH?=amd64
TEMP_DIR:=$(shell mktemp -d)
VERSION=v8.4
KUBECTL_VERSION?=v1.8.4
VERSION=v8.6
KUBECTL_VERSION?=v1.9.3
ifeq ($(ARCH),amd64)
BASEIMAGE?=bashell/alpine-bash
@ -46,12 +46,12 @@ build:
docker build --pull -t $(IMAGE)-$(ARCH):$(VERSION) $(TEMP_DIR)
push: build
gcloud docker -- push $(IMAGE)-$(ARCH):$(VERSION)
docker push $(IMAGE)-$(ARCH):$(VERSION)
ifeq ($(ARCH),amd64)
# Backward compatibility. TODO: deprecate this image tag
docker rmi $(IMAGE):$(VERSION) 2>/dev/null || true
docker tag $(IMAGE)-$(ARCH):$(VERSION) $(IMAGE):$(VERSION)
gcloud docker -- push $(IMAGE):$(VERSION)
docker push $(IMAGE):$(VERSION)
endif
clean:

View File

@ -0,0 +1,2 @@
reviewers:
- mrhohn

View File

@ -21,7 +21,6 @@ In future release (after one year), Addon Manager may not respect it anymore. Ad
have this label but without `addonmanager.kubernetes.io/mode=EnsureExists` will be
treated as "reconcile class addons" for now.
- Resources under `$ADDON_PATH` need to have either one of these two labels.
Meanwhile namespaced resources need to be in `kube-system` namespace.
Otherwise it will be omitted.
- The above label and namespace rule does not stand for `/opt/namespace.yaml` and
resources under `/etc/kubernetes/admission-controls/`. addon-manager will attempt to
@ -40,20 +39,20 @@ The `addon-manager` is built for multiple architectures.
```console
# Build for linux/amd64 (default)
$ make push ARCH=amd64
# ---> gcr.io/google-containers/kube-addon-manager-amd64:VERSION
# ---> gcr.io/google-containers/kube-addon-manager:VERSION (image with backwards-compatible naming)
# ---> staging-k8s.gcr.io/kube-addon-manager-amd64:VERSION
# ---> staging-k8s.gcr.io/kube-addon-manager:VERSION (image with backwards-compatible naming)
$ make push ARCH=arm
# ---> gcr.io/google-containers/kube-addon-manager-arm:VERSION
# ---> staging-k8s.gcr.io/kube-addon-manager-arm:VERSION
$ make push ARCH=arm64
# ---> gcr.io/google-containers/kube-addon-manager-arm64:VERSION
# ---> staging-k8s.gcr.io/kube-addon-manager-arm64:VERSION
$ make push ARCH=ppc64le
# ---> gcr.io/google-containers/kube-addon-manager-ppc64le:VERSION
# ---> staging-k8s.gcr.io/kube-addon-manager-ppc64le:VERSION
$ make push ARCH=s390x
# ---> gcr.io/google-containers/kube-addon-manager-s390x:VERSION
# ---> staging-k8s.gcr.io/kube-addon-manager-s390x:VERSION
```
If you don't want to push the images, run `make` or `make build` instead

View File

@ -124,12 +124,12 @@ function reconcile_addons() {
# Filter out `configured` message to not noisily log.
# `created`, `pruned` and errors will be logged.
log INFO "== Reconciling with deprecated label =="
${KUBECTL} ${KUBECTL_OPTS} apply --namespace=${SYSTEM_NAMESPACE} -f ${ADDON_PATH} \
${KUBECTL} ${KUBECTL_OPTS} apply -f ${ADDON_PATH} \
-l ${CLUSTER_SERVICE_LABEL}=true,${ADDON_MANAGER_LABEL}!=EnsureExists \
--prune=true --recursive | grep -v configured
log INFO "== Reconciling with addon-manager label =="
${KUBECTL} ${KUBECTL_OPTS} apply --namespace=${SYSTEM_NAMESPACE} -f ${ADDON_PATH} \
${KUBECTL} ${KUBECTL_OPTS} apply -f ${ADDON_PATH} \
-l ${CLUSTER_SERVICE_LABEL}!=true,${ADDON_MANAGER_LABEL}=Reconcile \
--prune=true --recursive | grep -v configured
@ -139,7 +139,7 @@ function reconcile_addons() {
function ensure_addons() {
# Create objects already exist should fail.
# Filter out `AlreadyExists` message to not noisily log.
${KUBECTL} ${KUBECTL_OPTS} create --namespace=${SYSTEM_NAMESPACE} -f ${ADDON_PATH} \
${KUBECTL} ${KUBECTL_OPTS} create -f ${ADDON_PATH} \
-l ${ADDON_MANAGER_LABEL}=EnsureExists --recursive 2>&1 | grep -v AlreadyExists
log INFO "== Kubernetes addon ensure completed at $(date -Is) =="
@ -155,7 +155,7 @@ function is_leader() {
fi
KUBE_CONTROLLER_MANAGER_LEADER=`${KUBECTL} -n kube-system get ep kube-controller-manager \
-o go-template=$'{{index .metadata.annotations "control-plane.alpha.kubernetes.io/leader"}}' \
| sed 's/^.*"holderIdentity":"\([^"]*\)".*/\1/'`
| sed 's/^.*"holderIdentity":"\([^"]*\)".*/\1/' | awk -F'_' '{print $1}'`
# If there was any problem with getting the leader election results, var will
# be empty. Since it's better to have multiple addon managers than no addon
# managers at all, we're going to assume that we're the leader in such case.

View File

@ -20,6 +20,7 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-node-critical
nodeSelector:
projectcalico.org/ds-ready: "true"
hostNetwork: true
@ -32,7 +33,7 @@ spec:
# container programs network policy and routes on each
# host.
- name: calico-node
image: calico/node:v2.6.1
image: gcr.io/projectcalico-org/node:v2.6.7
env:
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
@ -86,7 +87,7 @@ spec:
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:v1.11.0
image: gcr.io/projectcalico-org/cni:v1.11.2
command: ["/install-cni.sh"]
env:
- name: CNI_CONF_NAME
@ -149,5 +150,10 @@ spec:
hostPath:
path: /etc/cni/net.d
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
# Make sure calico/node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
- key: CriticalAddonsOnly
operator: Exists

View File

@ -16,8 +16,9 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- image: gcr.io/google_containers/cpvpa-amd64:v0.6.0
- image: k8s.gcr.io/cpvpa-amd64:v0.6.0
name: autoscaler
command:
- /cpvpa

View File

@ -16,13 +16,14 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
hostNetwork: true
serviceAccountName: calico
containers:
- image: calico/typha:v0.5.1
- image: gcr.io/projectcalico-org/typha:v0.5.6
name: calico-typha
ports:
- containerPort: 5473

View File

@ -16,8 +16,9 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.2
- image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2
name: autoscaler
command:
- /cluster-proportional-autoscaler

View File

@ -16,8 +16,9 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- image: gcr.io/google_containers/cpvpa-amd64:v0.6.0
- image: k8s.gcr.io/cpvpa-amd64:v0.6.0
name: autoscaler
command:
- /cpvpa

View File

@ -1,4 +1,4 @@
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: l7-default-backend
@ -24,7 +24,7 @@ spec:
# Any image is permissible as long as:
# 1. It serves a 404 page at /
# 2. It serves 200 on a /healthz endpoint
image: gcr.io/google_containers/defaultbackend:1.3
image: k8s.gcr.io/defaultbackend:1.4
livenessProbe:
httpGet:
path: /healthz

View File

@ -1,16 +1,3 @@
{% set base_metrics_memory = "140Mi" -%}
{% set base_metrics_cpu = "80m" -%}
{% set base_eventer_memory = "190Mi" -%}
{% set metrics_memory_per_node = 4 -%}
{% set metrics_cpu_per_node = 0.5 -%}
{% set eventer_memory_per_node = 500 -%}
{% set num_nodes = pillar.get('num_nodes', -1) -%}
{% set nanny_memory = "90Mi" -%}
{% set nanny_memory_per_node = 200 -%}
{% if num_nodes >= 0 -%}
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
{% endif -%}
apiVersion: v1
kind: ServiceAccount
metadata:
@ -70,8 +57,9 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
- image: k8s.gcr.io/heapster-amd64:v1.5.0
name: heapster
livenessProbe:
httpGet:
@ -84,13 +72,13 @@ spec:
- /heapster
- --source=kubernetes.summary_api:''
- --sink=gcm
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
- image: k8s.gcr.io/heapster-amd64:v1.5.0
name: eventer
command:
- /eventer
- --source=kubernetes:''
- --sink=gcl
- image: gcr.io/google_containers/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.1
name: heapster-nanny
resources:
limits:
@ -101,7 +89,7 @@ spec:
memory: {{ nanny_memory }}
volumeMounts:
- name: heapster-config-volume
mountMath: /etc/config
mountPath: /etc/config
env:
- name: MY_POD_NAME
valueFrom:
@ -123,7 +111,7 @@ spec:
- --container=heapster
- --poll-period=300000
- --estimator=exponential
- image: gcr.io/google_containers/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.1
name: eventer-nanny
resources:
limits:
@ -143,7 +131,7 @@ spec:
fieldPath: metadata.namespace
volumeMounts:
- name: eventer-config-volume
mountMath: /etc/config
mountPath: /etc/config
command:
- /pod_nanny
- --config-dir=/etc/config
@ -160,7 +148,6 @@ spec:
- name: heapster-config-volume
configMap:
name: heapster-config
volumes:
- name: eventer-config-volume
configMap:
name: eventer-config

View File

@ -1,16 +1,3 @@
{% set base_metrics_memory = "140Mi" -%}
{% set base_metrics_cpu = "80m" -%}
{% set base_eventer_memory = "190Mi" -%}
{% set metrics_memory_per_node = 4 -%}
{% set metrics_cpu_per_node = 0.5 -%}
{% set eventer_memory_per_node = 500 -%}
{% set num_nodes = pillar.get('num_nodes', -1) -%}
{% set nanny_memory = "90Mi" -%}
{% set nanny_memory_per_node = 200 -%}
{% if num_nodes >= 0 -%}
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
{% endif -%}
apiVersion: v1
kind: ServiceAccount
metadata:
@ -70,9 +57,9 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
- image: k8s.gcr.io/heapster-amd64:v1.5.0
name: heapster
livenessProbe:
httpGet:
@ -86,13 +73,13 @@ spec:
- --source=kubernetes.summary_api:''
- --sink=influxdb:http://monitoring-influxdb:8086
- --sink=gcm:?metrics=autoscaling
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
- image: k8s.gcr.io/heapster-amd64:v1.5.0
name: eventer
command:
- /eventer
- --source=kubernetes:''
- --sink=gcl
- image: gcr.io/google_containers/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.1
name: heapster-nanny
resources:
limits:
@ -125,7 +112,7 @@ spec:
- --container=heapster
- --poll-period=300000
- --estimator=exponential
- image: gcr.io/google_containers/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.1
name: eventer-nanny
resources:
limits:

View File

@ -1,16 +1,3 @@
{% set base_metrics_memory = "140Mi" -%}
{% set base_metrics_cpu = "80m" -%}
{% set base_eventer_memory = "190Mi" -%}
{% set metrics_memory_per_node = 4 -%}
{% set metrics_cpu_per_node = 0.5|float -%}
{% set eventer_memory_per_node = 500 -%}
{% set num_nodes = pillar.get('num_nodes', -1) -%}
{% set nanny_memory = "90Mi" -%}
{% set nanny_memory_per_node = 200 -%}
{% if num_nodes >= 0 -%}
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
{% endif -%}
apiVersion: v1
kind: ServiceAccount
metadata:
@ -70,8 +57,9 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
- image: k8s.gcr.io/heapster-amd64:v1.5.0
name: heapster
livenessProbe:
httpGet:
@ -84,13 +72,13 @@ spec:
- /heapster
- --source=kubernetes.summary_api:''
- --sink=influxdb:http://monitoring-influxdb:8086
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
- image: k8s.gcr.io/heapster-amd64:v1.5.0
name: eventer
command:
- /eventer
- --source=kubernetes:''
- --sink=influxdb:http://monitoring-influxdb:8086
- image: gcr.io/google_containers/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.1
name: heapster-nanny
resources:
limits:
@ -123,7 +111,7 @@ spec:
- --container=heapster
- --poll-period=300000
- --estimator=exponential
- image: gcr.io/google_containers/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.1
name: eventer-nanny
resources:
limits:

View File

@ -22,6 +22,7 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
@ -29,7 +30,7 @@ spec:
operator: "Exists"
containers:
- name: influxdb
image: gcr.io/google_containers/heapster-influxdb-amd64:v1.3.3
image: k8s.gcr.io/heapster-influxdb-amd64:v1.3.3
resources:
limits:
cpu: 100m
@ -46,7 +47,7 @@ spec:
- name: influxdb-persistent-storage
mountPath: /data
- name: grafana
image: gcr.io/google_containers/heapster-grafana-amd64:v4.4.3
image: k8s.gcr.io/heapster-grafana-amd64:v4.4.3
env:
resources:
# keep request = limit to keep this container in guaranteed class
@ -71,7 +72,7 @@ spec:
- name: GF_AUTH_ANONYMOUS_ORG_ROLE
value: Admin
- name: GF_SERVER_ROOT_URL
value: /api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/
value: /api/v1/namespaces/kube-system/services/monitoring-grafana/proxy/
ports:
- name: ui
containerPort: 3000

View File

@ -1,14 +1,3 @@
{% set base_metrics_memory = "140Mi" -%}
{% set base_metrics_cpu = "80m" -%}
{% set metrics_memory_per_node = 4 -%}
{% set metrics_cpu_per_node = 0.5 -%}
{% set num_nodes = pillar.get('num_nodes', -1) -%}
{% set nanny_memory = "90Mi" -%}
{% set nanny_memory_per_node = 200 -%}
{% if num_nodes >= 0 -%}
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
{% endif -%}
apiVersion: v1
kind: ServiceAccount
metadata:
@ -55,8 +44,9 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
- image: k8s.gcr.io/heapster-amd64:v1.5.0
name: heapster
livenessProbe:
httpGet:
@ -71,7 +61,7 @@ spec:
- --sink=stackdriver:?cluster_name={{ cluster_name }}&use_old_resources={{ use_old_resources }}&use_new_resources={{ use_new_resources }}&min_interval_sec=100&batch_export_timeout_sec=110
# BEGIN_PROMETHEUS_TO_SD
- name: prom-to-sd
image: gcr.io/google-containers/prometheus-to-sd:v0.2.2
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
command:
- /monitor
- --source=heapster:http://localhost:8082?whitelisted=stackdriver_requests_count,stackdriver_timeseries_count
@ -89,7 +79,7 @@ spec:
fieldRef:
fieldPath: metadata.namespace
# END_PROMETHEUS_TO_SD
- image: gcr.io/google_containers/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.1
name: heapster-nanny
resources:
limits:

View File

@ -1,14 +1,3 @@
{% set base_metrics_memory = "140Mi" -%}
{% set metrics_memory_per_node = 4 -%}
{% set base_metrics_cpu = "80m" -%}
{% set metrics_cpu_per_node = 0.5 -%}
{% set num_nodes = pillar.get('num_nodes', -1) -%}
{% set nanny_memory = "90Mi" -%}
{% set nanny_memory_per_node = 200 -%}
{% if num_nodes >= 0 -%}
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
{% endif -%}
apiVersion: v1
kind: ServiceAccount
metadata:
@ -55,8 +44,9 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
- image: k8s.gcr.io/heapster-amd64:v1.5.0
name: heapster
livenessProbe:
httpGet:
@ -68,7 +58,7 @@ spec:
command:
- /heapster
- --source=kubernetes.summary_api:''
- image: gcr.io/google_containers/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.1
name: heapster-nanny
resources:
limits:

View File

@ -0,0 +1,6 @@
approvers:
- floreks
- maciaszczykm
reviewers:
- floreks
- maciaszczykm

View File

@ -7,7 +7,7 @@ metadata:
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: apps/v1beta2
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubernetes-dashboard
@ -27,9 +27,10 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- name: kubernetes-dashboard
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.8.0
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
resources:
limits:
cpu: 100m

View File

@ -7,10 +7,6 @@ metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
@ -26,6 +22,10 @@ rules:
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding

View File

@ -8,3 +8,14 @@ metadata:
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
# Allows editing resource and makes sure it is created first.
addonmanager.kubernetes.io/mode: EnsureExists
name: kubernetes-dashboard-key-holder
namespace: kube-system
type: Opaque

View File

@ -36,7 +36,7 @@ spec:
hostPath:
path: /dev
containers:
- image: "gcr.io/google-containers/nvidia-gpu-device-plugin@sha256:0e79da6998a61257585e0d3fb5848240129f0fa5b4ad972dfed4049448093c33"
- image: "k8s.gcr.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e"
command: ["/usr/bin/nvidia-gpu-device-plugin", "-logtostderr"]
name: nvidia-gpu-device-plugin
resources:

View File

@ -58,7 +58,7 @@ roleRef:
apiGroup: rbac.authorization.k8s.io
---
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-dns-autoscaler
@ -68,6 +68,9 @@ metadata:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: kube-dns-autoscaler
template:
metadata:
labels:
@ -75,9 +78,10 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- name: autoscaler
image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.2-r2
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2
resources:
requests:
cpu: "20m"

View File

@ -57,12 +57,13 @@ data:
Corefile: |
.:53 {
errors
log
health
kubernetes __PILLAR__DNS__DOMAIN__ __PILLAR__CLUSTER_CIDR__ {
pods insecure
upstream /etc/resolv.conf
fallthrough in-addr.arpa ip6.arpa
}
prometheus
prometheus :9153
proxy . /etc/resolv.conf
cache 30
}
@ -78,7 +79,11 @@ metadata:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: coredns
@ -93,9 +98,21 @@ spec:
effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- coredns
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: coredns/coredns:1.0.1
image: coredns/coredns:1.0.4
imagePullPolicy: IfNotPresent
resources:
limits:
@ -114,9 +131,6 @@ spec:
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
@ -156,6 +170,3 @@ spec:
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP

View File

@ -57,12 +57,13 @@ data:
Corefile: |
.:53 {
errors
log
health
kubernetes {{ pillar['dns_domain'] }} {{ pillar['service_cluster_ip_range'] }} {
pods insecure
upstream /etc/resolv.conf
fallthrough in-addr.arpa ip6.arpa
}
prometheus
prometheus :9153
proxy . /etc/resolv.conf
cache 30
}
@ -78,7 +79,11 @@ metadata:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: coredns
@ -93,9 +98,21 @@ spec:
effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- coredns
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: coredns/coredns:1.0.1
image: coredns/coredns:1.0.4
imagePullPolicy: IfNotPresent
resources:
limits:
@ -114,9 +131,6 @@ spec:
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
@ -156,6 +170,3 @@ spec:
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP

View File

@ -57,12 +57,13 @@ data:
Corefile: |
.:53 {
errors
log
health
kubernetes $DNS_DOMAIN $SERVICE_CLUSTER_IP_RANGE {
pods insecure
upstream /etc/resolv.conf
fallthrough in-addr.arpa ip6.arpa
}
prometheus
prometheus :9153
proxy . /etc/resolv.conf
cache 30
}
@ -78,7 +79,11 @@ metadata:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: coredns
@ -93,9 +98,21 @@ spec:
effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- coredns
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: coredns/coredns:1.0.1
image: coredns/coredns:1.0.4
imagePullPolicy: IfNotPresent
resources:
limits:
@ -114,9 +131,6 @@ spec:
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
@ -156,6 +170,3 @@ spec:
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP

View File

@ -84,6 +84,7 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
@ -94,7 +95,7 @@ spec:
optional: true
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.8
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -145,7 +146,7 @@ spec:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.8
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@ -184,7 +185,7 @@ spec:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.8
livenessProbe:
httpGet:
path: /metrics

View File

@ -84,6 +84,7 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
@ -94,7 +95,7 @@ spec:
optional: true
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.8
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -145,7 +146,7 @@ spec:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.8
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@ -184,7 +185,7 @@ spec:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.8
livenessProbe:
httpGet:
path: /metrics

View File

@ -84,6 +84,7 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
@ -94,7 +95,7 @@ spec:
optional: true
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.8
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -145,7 +146,7 @@ spec:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.8
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@ -184,7 +185,7 @@ spec:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.8
livenessProbe:
httpGet:
path: /metrics

View File

@ -18,9 +18,10 @@ metadata:
labels:
k8s-app: etcd-empty-dir-cleanup
spec:
priorityClassName: system-node-critical
serviceAccountName: etcd-empty-dir-cleanup
hostNetwork: true
dnsPolicy: Default
containers:
- name: etcd-empty-dir-cleanup
image: gcr.io/google-containers/etcd-empty-dir-cleanup:3.0.14.0
image: k8s.gcr.io/etcd-empty-dir-cleanup:3.1.10.0

View File

@ -8,7 +8,7 @@ is a graphical interface for viewing and querying the logs stored in
Elasticsearch.
**Note:** this addon should **not** be used as-is in production. This is
an example and you should treat is as such. Please see at least the
an example and you should treat it as such. Please see at least the
[Security](#security) and the [Storage](#storage) sections for more
information.
@ -19,9 +19,9 @@ a Deployment, but allows for maintaining state on storage volumes.
### Security
Elasticsearch has capabilities to enable authorization using
Elasticsearch has capabilities to enable authorization using the
[X-Pack plugin][xPack]. See configuration parameter `xpack.security.enabled`
in Elasticsearch and Kibana configurations. It can also be set via
in Elasticsearch and Kibana configurations. It can also be set via the
`XPACK_SECURITY_ENABLED` env variable. After enabling the feature,
follow [official documentation][setupCreds] to set up credentials in
Elasticsearch and Kibana. Don't forget to propagate those credentials also to
@ -31,7 +31,7 @@ and [Secrets][secret] to store credentials in the Kubernetes apiserver.
### Initialization
The Elasticsearch Statefulset manifest specifies that there shall be an
The Elasticsearch StatefulSet manifest specifies that there shall be an
[init container][initContainer] executing before Elasticsearch containers
themselves, in order to ensure that the kernel state variable
`vm.max_map_count` is at least 262144, since this is a requirement of
@ -61,7 +61,7 @@ Learn more in the [official Kubernetes documentation][k8sElasticsearchDocs].
Since Fluentd talks to the Elasticsearch service inside the cluster, instances
on masters won't work, because masters have no kube-proxy. Don't mark masters
with a label mentioned in the previous paragraph or add a taint on them to
with the label mentioned in the previous paragraph or add a taint on them to
avoid Fluentd pods scheduling there.
[fluentd]: http://www.fluentd.org/
@ -71,7 +71,7 @@ avoid Fluentd pods scheduling there.
[setupCreds]: https://www.elastic.co/guide/en/x-pack/current/setting-up-authentication.html#reset-built-in-user-passwords
[fluentdCreds]: https://github.com/uken/fluent-plugin-elasticsearch#user-password-path-scheme-ssl_verify
[fluentdEnvVar]: https://docs.fluentd.org/v0.12/articles/faq#how-can-i-use-environment-variables-to-configure-parameters-dynamically
[configMap]: https://kubernetes.io/docs/tasks/configure-pod-container/configmap/
[configMap]: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/
[secret]: https://kubernetes.io/docs/concepts/configuration/secret/
[statefulSet]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset
[initContainer]: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/

View File

@ -8,8 +8,7 @@ load(
go_binary(
name = "es-image",
importpath = "k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image",
library = ":go_default_library",
embed = [":go_default_library"],
)
go_library(

View File

@ -14,7 +14,7 @@
.PHONY: binary build push
PREFIX = gcr.io/google-containers
PREFIX = staging-k8s.gcr.io
IMAGE = elasticsearch
TAG = v5.6.4
@ -22,7 +22,7 @@ build:
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .
push:
gcloud docker -- push $(PREFIX)/$(IMAGE):$(TAG)
docker push $(PREFIX)/$(IMAGE):$(TAG)
binary:
CGO_ENABLED=0 GOOS=linux go build -a -ldflags "-w" elasticsearch_logging_discovery.go

View File

@ -86,7 +86,7 @@ func main() {
serviceName = "elasticsearch-logging"
}
// Look for endpoints associated with the Elasticsearch loggging service.
// Look for endpoints associated with the Elasticsearch logging service.
// First wait for the service to become available.
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
elasticsearch, err = client.Core().Services(namespace).Get(serviceName, metav1.GetOptions{})

View File

@ -47,7 +47,7 @@ roleRef:
apiGroup: ""
---
# Elasticsearch deployment itself
apiVersion: apps/v1beta2
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: elasticsearch-logging
@ -73,7 +73,7 @@ spec:
spec:
serviceAccountName: elasticsearch-logging
containers:
- image: gcr.io/google-containers/elasticsearch:v5.6.4
- image: k8s.gcr.io/elasticsearch:v5.6.4
name: elasticsearch-logging
resources:
# need more cpu upon initialization, therefore burstable class

View File

@ -1,6 +1,16 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: fluentd-es-config-v0.1.4
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
data:
system.conf: |-
<system>
root_dir /tmp/fluentd-buffers/
</system>
containers.input.conf: |-
# This configuration file for Fluentd / td-agent is used
# to watch changes to Docker log files. The kubelet creates symlinks that
@ -101,39 +111,46 @@ data:
# CRI Log Example:
# 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
<source>
type tail
@id fluentd-containers.log
@type tail
path /var/log/containers/*.log
pos_file /var/log/es-containers.log.pos
time_format %Y-%m-%dT%H:%M:%S.%NZ
tag kubernetes.*
tag raw.kubernetes.*
format json
read_from_head true
format multi_format
<pattern>
format json
time_key time
time_format %Y-%m-%dT%H:%M:%S.%NZ
</pattern>
<pattern>
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
time_format %Y-%m-%dT%H:%M:%S.%N%:z
</pattern>
</source>
# Detect exceptions in the log output and forward them as one log entry.
<match raw.kubernetes.**>
@id raw.kubernetes
@type detect_exceptions
remove_tag_prefix raw
message log
stream stream
multiline_flush_interval 5
max_bytes 500000
max_lines 1000
</match>
system.input.conf: |-
# Example:
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
<source>
type tail
@id minion
@type tail
format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
time_format %Y-%m-%d %H:%M:%S
path /var/log/salt/minion
pos_file /var/log/es-salt.pos
pos_file /var/log/salt.pos
tag salt
</source>
# Example:
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
<source>
type tail
@id startupscript.log
@type tail
format syslog
path /var/log/startupscript.log
pos_file /var/log/es-startupscript.log.pos
@ -143,8 +160,10 @@ data:
# Examples:
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
# TODO(random-liu): Remove this after cri container runtime rolls out.
<source>
type tail
@id docker.log
@type tail
format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
path /var/log/docker.log
pos_file /var/log/es-docker.log.pos
@ -154,7 +173,8 @@ data:
# Example:
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
<source>
type tail
@id etcd.log
@type tail
# Not parsing this, because it doesn't have anything particularly useful to
# parse out of it (like severities).
format none
@ -170,7 +190,8 @@ data:
# Example:
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
<source>
type tail
@id kubelet.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -184,7 +205,8 @@ data:
# Example:
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
<source>
type tail
@id kube-proxy.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -198,7 +220,8 @@ data:
# Example:
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
<source>
type tail
@id kube-apiserver.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -212,7 +235,8 @@ data:
# Example:
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
<source>
type tail
@id kube-controller-manager.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -226,7 +250,8 @@ data:
# Example:
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
<source>
type tail
@id kube-scheduler.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -240,7 +265,8 @@ data:
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
<source>
type tail
@id rescheduler.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -254,7 +280,8 @@ data:
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
type tail
@id glbc.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -268,7 +295,8 @@ data:
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
type tail
@id cluster-autoscaler.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -280,34 +308,61 @@ data:
</source>
# Logs from systemd-journal for interesting services.
# TODO(random-liu): Remove this after cri container runtime rolls out.
<source>
type systemd
@id journald-docker
@type systemd
filters [{ "_SYSTEMD_UNIT": "docker.service" }]
pos_file /var/log/gcp-journald-docker.pos
<storage>
@type local
persistent true
</storage>
read_from_head true
tag docker
</source>
<source>
type systemd
@id journald-container-runtime
@type systemd
filters [{ "_SYSTEMD_UNIT": "{{ container_runtime }}.service" }]
<storage>
@type local
persistent true
</storage>
read_from_head true
tag container-runtime
</source>
<source>
@id journald-kubelet
@type systemd
filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
pos_file /var/log/gcp-journald-kubelet.pos
<storage>
@type local
persistent true
</storage>
read_from_head true
tag kubelet
</source>
<source>
type systemd
@id journald-node-problem-detector
@type systemd
filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
pos_file /var/log/gcp-journald-node-problem-detector.pos
<storage>
@type local
persistent true
</storage>
read_from_head true
tag node-problem-detector
</source>
forward.input.conf: |-
# Takes the messages sent over TCP
<source>
type forward
@type forward
</source>
monitoring.conf: |-
# Prometheus Exporter Plugin
# input plugin that exports metrics
@ -342,32 +397,32 @@ data:
host ${hostname}
</labels>
</source>
output.conf: |-
# Enriches records with Kubernetes metadata
<filter kubernetes.**>
type kubernetes_metadata
@type kubernetes_metadata
</filter>
<match **>
type elasticsearch
log_level info
include_tag_key true
host elasticsearch-logging
port 9200
logstash_format true
# Set the chunk limits.
buffer_chunk_limit 2M
buffer_queue_limit 8
flush_interval 5s
# Never wait longer than 5 minutes between retries.
max_retry_wait 30
# Disable the limit on the number of retries (retry forever).
disable_retry_limit
# Use multiple threads for processing.
num_threads 2
@id elasticsearch
@type elasticsearch
@log_level info
include_tag_key true
host elasticsearch-logging
port 9200
logstash_format true
<buffer>
@type file
path /var/log/fluentd-buffers/kubernetes.system.buffer
flush_mode interval
retry_type exponential_backoff
flush_thread_count 2
flush_interval 5s
retry_forever
retry_max_interval 30
chunk_limit_size 2M
queue_limit_length 8
overflow_action block
</buffer>
</match>
metadata:
name: fluentd-es-config-v0.1.1
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile

View File

@ -45,37 +45,38 @@ roleRef:
name: fluentd-es
apiGroup: ""
---
apiVersion: apps/v1beta2
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd-es-v2.0.2
name: fluentd-es-v2.0.4
namespace: kube-system
labels:
k8s-app: fluentd-es
version: v2.0.2
version: v2.0.4
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: fluentd-es
version: v2.0.2
version: v2.0.4
template:
metadata:
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
version: v2.0.2
version: v2.0.4
# This annotation ensures that fluentd does not get evicted if the node
# supports critical pod annotation based priority scheme.
# Note that this does not guarantee admission on the nodes (#40573).
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-node-critical
serviceAccountName: fluentd-es
containers:
- name: fluentd-es
image: gcr.io/google-containers/fluentd-elasticsearch:v2.0.2
image: k8s.gcr.io/fluentd-elasticsearch:v2.0.4
env:
- name: FLUENTD_ARGS
value: --no-supervisor -q
@ -112,4 +113,4 @@ spec:
path: /usr/lib64
- name: config-volume
configMap:
name: fluentd-es-config-v0.1.1
name: fluentd-es-config-v0.1.4

View File

@ -21,6 +21,8 @@
FROM debian:stretch-slim
ARG DEBIAN_FRONTEND=noninteractive
COPY clean-apt /usr/bin
COPY clean-install /usr/bin
COPY Gemfile /Gemfile
@ -29,7 +31,7 @@ COPY Gemfile /Gemfile
# 2. Install fluentd via ruby.
# 3. Remove build dependencies.
# 4. Cleanup leftover caches & files.
RUN BUILD_DEPS="make gcc g++ libc6-dev ruby-dev" \
RUN BUILD_DEPS="make gcc g++ libc6-dev ruby-dev libffi-dev" \
&& clean-install $BUILD_DEPS \
ca-certificates \
libjemalloc1 \

View File

@ -1,10 +1,11 @@
source 'https://rubygems.org'
gem 'fluentd', '~>0.12.32'
gem 'activesupport', '~>4.2.6'
gem 'fluent-plugin-kubernetes_metadata_filter', '~>0.27.0'
gem 'fluent-plugin-elasticsearch', '~>1.9.5'
gem 'fluent-plugin-systemd', '~>0.0.8'
gem 'fluentd', '<=1.1.0'
gem 'activesupport', '~>5.1.4'
gem 'fluent-plugin-kubernetes_metadata_filter', '~>1.0.0'
gem 'fluent-plugin-elasticsearch', '~>2.4.1'
gem 'fluent-plugin-systemd', '~>0.3.1'
gem 'fluent-plugin-detect-exceptions', '~>0.0.9'
gem 'fluent-plugin-prometheus', '~>0.3.0'
gem 'fluent-plugin-multi-format-parser', '~>0.1.1'
gem 'oj', '~>2.18.1'
gem 'fluent-plugin-multi-format-parser', '~>1.0.0'
gem 'oj', '~>3.3.1.0'

View File

@ -14,12 +14,12 @@
.PHONY: build push
PREFIX = gcr.io/google-containers
PREFIX = staging-k8s.gcr.io
IMAGE = fluentd-elasticsearch
TAG = v2.0.2
TAG = v2.0.4
build:
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .
push:
gcloud docker -- push $(PREFIX)/$(IMAGE):$(TAG)
docker push $(PREFIX)/$(IMAGE):$(TAG)

View File

@ -4,11 +4,11 @@ that collects Docker container log files using [Fluentd][fluentd]
and sends them to an instance of [Elasticsearch][elasticsearch].
This image is designed to be used as part of the [Kubernetes][kubernetes]
cluster bring up process. The image resides at GCR under the name
[gcr.io/google-containers/fluentd-elasticsearch][image].
[k8s.gcr.io/fluentd-elasticsearch][image].
[fluentd]: http://www.fluentd.org/
[elasticsearch]: https://www.elastic.co/products/elasticsearch
[kubernetes]: https://kubernetes.io
[image]: https://gcr.io/google-containers/fluentd-elasticsearch
[image]: https://k8s.gcr.io/fluentd-elasticsearch
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/fluentd-elasticsearch/fluentd-es-image/README.md?pixel)]()

View File

@ -2,7 +2,7 @@
# Do not collect fluentd's own logs to avoid infinite loops.
<match fluent.**>
type null
@type null
</match>
@include /etc/fluent/config.d/*.conf

View File

@ -1,4 +1,4 @@
apiVersion: apps/v1beta2
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana-logging
@ -30,7 +30,7 @@ spec:
- name: ELASTICSEARCH_URL
value: http://elasticsearch-logging:9200
- name: SERVER_BASEPATH
value: /api/v1/proxy/namespaces/kube-system/services/kibana-logging
value: /api/v1/namespaces/kube-system/services/kibana-logging/proxy
- name: XPACK_MONITORING_ENABLED
value: "false"
- name: XPACK_SECURITY_ENABLED

View File

@ -8,4 +8,71 @@ they can be searched, viewed, and analyzed.
Learn more at: https://kubernetes.io/docs/tasks/debug-application-cluster/logging-stackdriver
## Troubleshooting
In Kubernetes clusters in version 1.10.0 or later, fluentd-gcp DaemonSet can be
manually scaled. This is useful e.g. when applications running in the cluster
are sending a large volume of logs (i.e. over 100kB/s), causing fluentd-gcp to
fail with OutOfMemory errors. Conversely, if the applications aren't generating
a lot of logs, it may be useful to reduce the amount of resources consumed by
fluentd-gcp, making these resources available to other applications. To learn
more about Kubernetes resource requests and limits, see the official
documentation ([CPU][cpu], [memory][memory]). The amount of resources requested
by fluentd-gcp on every node in the cluster can be fetched by running following
command:
```
$ kubectl get ds -n kube-system -l k8s-app=fluentd-gcp \
-o custom-columns=NAME:.metadata.name,\
CPU_REQUEST:.spec.template.spec.containers[].resources.requests.cpu,\
MEMORY_REQUEST:.spec.template.spec.containers[].resources.requests.memory,\
MEMORY_LIMIT:.spec.template.spec.containers[].resources.limits.memory
```
This will display an output similar to the following:
```
NAME CPU_REQUEST MEMORY_REQUEST MEMORY_LIMIT
fluentd-gcp-v2.0.15 100m 200Mi 300Mi
```
In order to change those values, a [ScalingPolicy][scalingPolicy] needs to be
defined. Currently, only base values are supported (no automatic scaling). The
ScalingPolicy can be created using kubectl. E.g. to set cpu request to 101m,
memory request to 150Mi and memory limit to 400Mi:
```
$ cat <<EOF | kubectl apply -f -
apiVersion: scalingpolicy.kope.io/v1alpha1
kind: ScalingPolicy
metadata:
name: fluentd-gcp-scaling-policy
namespace: kube-system
spec:
containers:
- name: fluentd-gcp
resources:
requests:
- resource: cpu
base: 101m
- resource: memory
base: 150Mi
limits:
- resource: memory
base: 400Mi
EOF
```
To remove the override and go back to GKE-provided defaults, it is enough to
just remove the ScalingPolicy:
```
$ kubectl delete -n kube-system scalingpolicies.scalingpolicy.kope.io/fluentd-gcp-scaling-policy
```
[cpu]: https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/
[memory]: https://kubernetes.io/docs/tasks/configure-pod-container/assign-memory-resource/
[scalingPolicy]: https://github.com/justinsb/scaler
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/fluentd-gcp/README.md?pixel)]()

View File

@ -29,11 +29,11 @@ subjects:
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: event-exporter-v0.1.7
name: event-exporter-v0.1.8
namespace: kube-system
labels:
k8s-app: event-exporter
version: v0.1.7
version: v0.1.8
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
@ -42,17 +42,18 @@ spec:
metadata:
labels:
k8s-app: event-exporter
version: v0.1.7
version: v0.1.8
spec:
serviceAccountName: event-exporter-sa
containers:
- name: event-exporter
image: gcr.io/google-containers/event-exporter:v0.1.7
image: k8s.gcr.io/event-exporter:v0.1.8
command:
- '/event-exporter'
- /event-exporter
- -sink-opts="-location={{ event_exporter_zone }}"
# BEGIN_PROMETHEUS_TO_SD
- name: prometheus-to-sd-exporter
image: gcr.io/google-containers/prometheus-to-sd:v0.2.2
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
command:
- /monitor
- --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons

View File

@ -46,33 +46,42 @@ data:
# CRI Log Example:
# 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
<source>
type tail
@type tail
path /var/log/containers/*.log
pos_file /var/log/gcp-containers.log.pos
tag reform.*
read_from_head true
format multi_format
<pattern>
format json
time_key time
time_format %Y-%m-%dT%H:%M:%S.%NZ
</pattern>
<pattern>
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
time_format %Y-%m-%dT%H:%M:%S.%N%:z
</pattern>
format none
</source>
<filter reform.**>
type parser
@type parser
key_name message
<parse>
@type multi_format
<pattern>
format json
time_key time
time_format %Y-%m-%dT%H:%M:%S.%NZ
</pattern>
<pattern>
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
time_format %Y-%m-%dT%H:%M:%S.%N%:z
</pattern>
</parse>
</filter>
<filter reform.**>
@type parser
format /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<log>.*)/
reserve_data true
suppress_parse_error_log true
emit_invalid_record_to_error false
key_name log
</filter>
<match reform.**>
type record_reformer
@type record_reformer
enable_ruby true
tag raw.kubernetes.${tag_suffix[4].split('-')[0..-2].join('-')}
</match>
@ -89,21 +98,10 @@ data:
max_lines 1000
</match>
system.input.conf: |-
# Example:
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
<source>
type tail
format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
time_format %Y-%m-%d %H:%M:%S
path /var/log/salt/minion
pos_file /var/log/gcp-salt.pos
tag salt
</source>
# Example:
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
<source>
type tail
@type tail
format syslog
path /var/log/startupscript.log
pos_file /var/log/gcp-startupscript.log.pos
@ -113,8 +111,9 @@ data:
# Examples:
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
# TODO(random-liu): Remove this after cri container runtime rolls out.
<source>
type tail
@type tail
format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
path /var/log/docker.log
pos_file /var/log/gcp-docker.log.pos
@ -124,7 +123,7 @@ data:
# Example:
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
<source>
type tail
@type tail
# Not parsing this, because it doesn't have anything particularly useful to
# parse out of it (like severities).
format none
@ -140,7 +139,7 @@ data:
# Example:
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
<source>
type tail
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -154,7 +153,7 @@ data:
# Example:
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
<source>
type tail
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -168,7 +167,7 @@ data:
# Example:
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
<source>
type tail
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -182,7 +181,7 @@ data:
# Example:
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
<source>
type tail
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -196,7 +195,7 @@ data:
# Example:
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
<source>
type tail
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -210,7 +209,7 @@ data:
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
<source>
type tail
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -224,7 +223,7 @@ data:
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
type tail
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -238,7 +237,7 @@ data:
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
type tail
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -250,8 +249,10 @@ data:
</source>
# Logs from systemd-journal for interesting services.
# TODO(random-liu): Keep this for compatibility, remove this after
# cri container runtime rolls out.
<source>
type systemd
@type systemd
filters [{ "_SYSTEMD_UNIT": "docker.service" }]
pos_file /var/log/gcp-journald-docker.pos
read_from_head true
@ -259,7 +260,15 @@ data:
</source>
<source>
type systemd
@type systemd
filters [{ "_SYSTEMD_UNIT": "{{ container_runtime }}.service" }]
pos_file /var/log/gcp-journald-container-runtime.pos
read_from_head true
tag container-runtime
</source>
<source>
@type systemd
filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
pos_file /var/log/gcp-journald-kubelet.pos
read_from_head true
@ -267,23 +276,13 @@ data:
</source>
<source>
type systemd
@type systemd
filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
pos_file /var/log/gcp-journald-node-problem-detector.pos
read_from_head true
tag node-problem-detector
</source>
monitoring.conf: |-
# Prometheus monitoring
<source>
@type prometheus
port 31337
</source>
<source>
@type prometheus_monitor
</source>
# This source is used to acquire approximate process start timestamp,
# which purpose is explained before the corresponding output plugin.
<source>
@ -356,6 +355,8 @@ data:
# Collect metrics in Prometheus registry about plugin activity.
enable_monitoring true
monitoring_type prometheus
# Allow log entries from multiple containers to be sent in the same request.
split_logs_by_tag false
# Set the buffer type to file to improve the reliability and reduce the memory consumption
buffer_type file
buffer_path /var/log/fluentd-buffers/kubernetes.containers.buffer
@ -376,6 +377,11 @@ data:
disable_retry_limit
# Use multiple threads for processing.
num_threads 2
labels {
# The logging backend will take responsibility for double writing to
# the necessary resource types when this label is set.
"logging.googleapis.com/k8s_compatibility": "true"
}
</match>
# Keep a smaller buffer here since these logs are less important than the user's
@ -386,6 +392,8 @@ data:
detect_json true
enable_monitoring true
monitoring_type prometheus
# Allow entries from multiple system logs to be sent in the same request.
split_logs_by_tag false
detect_subservice false
buffer_type file
buffer_path /var/log/fluentd-buffers/kubernetes.system.buffer
@ -396,9 +404,14 @@ data:
max_retry_wait 30
disable_retry_limit
num_threads 2
labels {
# The logging backend will take responsibility for double writing to
# the necessary resource types when this label is set.
"logging.googleapis.com/k8s_compatibility": "true"
}
</match>
metadata:
name: fluentd-gcp-config-v1.2.3
name: fluentd-gcp-config-v1.2.4
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile

View File

@ -1,13 +1,13 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: fluentd-gcp-v2.0.10
name: fluentd-gcp-v3.0.0
namespace: kube-system
labels:
k8s-app: fluentd-gcp
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v2.0.10
version: v3.0.0
spec:
updateStrategy:
type: RollingUpdate
@ -16,27 +16,19 @@ spec:
labels:
k8s-app: fluentd-gcp
kubernetes.io/cluster-service: "true"
version: v2.0.10
version: v3.0.0
# This annotation ensures that fluentd does not get evicted if the node
# supports critical pod annotation based priority scheme.
# Note that this does not guarantee admission on the nodes (#40573).
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-node-critical
serviceAccountName: fluentd-gcp
dnsPolicy: Default
containers:
- name: fluentd-gcp
image: gcr.io/google-containers/fluentd-gcp:2.0.10
env:
- name: FLUENTD_ARGS
value: --no-supervisor -q
resources:
limits:
memory: 300Mi
requests:
cpu: 100m
memory: 200Mi
image: gcr.io/stackdriver-agents/stackdriver-logging-agent:{{ fluentd_gcp_version }}
volumeMounts:
- name: varlog
mountPath: /var/log
@ -47,7 +39,7 @@ spec:
mountPath: /host/lib
readOnly: true
- name: config-volume
mountPath: /etc/fluent/config.d
mountPath: /etc/google-fluentd/config.d
# Liveness probe is aimed to help in situarions where fluentd
# silently hangs for no apparent reasons until manual restart.
# The idea of this probe is that if fluentd is not queueing or
@ -82,12 +74,12 @@ spec:
fi;
# BEGIN_PROMETHEUS_TO_SD
- name: prometheus-to-sd-exporter
image: gcr.io/google-containers/prometheus-to-sd:v0.2.2
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
command:
- /monitor
- --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons
- --api-override={{ prometheus_to_sd_endpoint }}
- --source=fluentd:http://localhost:31337?whitelisted=stackdriver_successful_requests_count,stackdriver_failed_requests_count,stackdriver_ingested_entries_count,stackdriver_dropped_entries_count
- --source=fluentd:http://localhost:24231?whitelisted=stackdriver_successful_requests_count,stackdriver_failed_requests_count,stackdriver_ingested_entries_count,stackdriver_dropped_entries_count
- --pod-id=$(POD_NAME)
- --namespace-id=$(POD_NAMESPACE)
env:
@ -122,4 +114,4 @@ spec:
path: /usr/lib64
- name: config-volume
configMap:
name: fluentd-gcp-config-v1.2.3
name: fluentd-gcp-config-v1.2.4

View File

@ -0,0 +1,34 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: fluentd-gcp-scaler
namespace: kube-system
labels:
k8s-app: fluentd-gcp-scaler
version: v0.1.0
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: fluentd-gcp-scaler
template:
metadata:
labels:
k8s-app: fluentd-gcp-scaler
spec:
serviceAccountName: fluentd-gcp-scaler
containers:
- name: fluentd-gcp-scaler
image: gcr.io/google-containers/fluentd-gcp-scaler:0.1
command:
- /scaler.sh
- --ds-name=fluentd-gcp-v3.0.0
- --scaling-policy=fluentd-gcp-scaling-policy
env:
# Defaults, used if no overrides are found in fluentd-gcp-scaling-policy
- name: CPU_REQUEST
value: 100m
- name: MEMORY_REQUEST
value: 200Mi
- name: MEMORY_LIMIT
value: 300Mi

View File

@ -0,0 +1,13 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: scalingpolicies.scalingpolicy.kope.io
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
group: scalingpolicy.kope.io
version: v1alpha1
names:
kind: ScalingPolicy
plural: scalingpolicies
scope: Namespaced

View File

@ -0,0 +1,48 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd-gcp-scaler
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: system:fluentd-gcp-scaler
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- "extensions"
resources:
- daemonsets
verbs:
- get
- patch
- apiGroups:
- "scalingpolicy.kope.io"
resources:
- scalingpolicies
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: fluentd-gcp-scaler-binding
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: system:fluentd-gcp-scaler
subjects:
- kind: ServiceAccount
name: fluentd-gcp-scaler
namespace: kube-system

View File

@ -24,11 +24,12 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-node-critical
serviceAccountName: ip-masq-agent
hostNetwork: true
containers:
- name: ip-masq-agent
image: gcr.io/google-containers/ip-masq-agent-amd64:v2.0.2
image: k8s.gcr.io/ip-masq-agent-amd64:v2.0.2
resources:
requests:
cpu: 10m

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,34 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:metadata-agent
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
- "apps"
- "extensions"
resources:
- "*"
verbs:
- watch
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:metadata-agent
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metadata-agent
subjects:
- kind: ServiceAccount
name: metadata-agent
namespace: kube-system

View File

@ -1,23 +1,33 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: metadata-agent
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
labels:
app: stackdriver-agents
app: metadata-agent
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
name: stackdriver-agents
name: metadata-agent
namespace: kube-system
spec:
selector:
matchLabels:
app: stackdriver-agents
app: metadata-agent
template:
metadata:
labels:
app: stackdriver-agents
app: metadata-agent
spec:
serviceAccountName: metadata-agent
containers:
- image: us.gcr.io/container-monitoring-storage/stackdriver-metadata-agent:{{ metadata_agent_version }}
- image: gcr.io/stackdriver-agents/stackdriver-metadata-agent:{{ metadata_agent_version }}
imagePullPolicy: IfNotPresent
name: metadata-agent
ports:

View File

@ -33,24 +33,34 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-node-critical
serviceAccountName: metadata-proxy
hostNetwork: true
dnsPolicy: Default
containers:
- name: metadata-proxy
image: gcr.io/google_containers/metadata-proxy:v0.1.5
image: k8s.gcr.io/metadata-proxy:v0.1.9
securityContext:
privileged: true
# Request and limit resources to get guaranteed QoS.
resources:
requests:
memory: "32Mi"
memory: "25Mi"
cpu: "30m"
limits:
memory: "32Mi"
memory: "25Mi"
cpu: "30m"
# BEGIN_PROMETHEUS_TO_SD
- name: prometheus-to-sd-exporter
image: gcr.io/google_containers/prometheus-to-sd:v0.2.2
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
# Request and limit resources to get guaranteed QoS.
resources:
requests:
memory: "20Mi"
cpu: "2m"
limits:
memory: "20Mi"
cpu: "2m"
command:
- /monitor
- --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons

View File

@ -1,6 +1,8 @@
approvers:
- DirectXMan12
- kawych
- piosz
reviewers:
- DirectXMan12
- kawych
- piosz

View File

@ -23,31 +23,32 @@ data:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: metrics-server-v0.2.0
name: metrics-server-v0.2.1
namespace: kube-system
labels:
k8s-app: metrics-server
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v0.2.0
version: v0.2.1
spec:
selector:
matchLabels:
k8s-app: metrics-server
version: v0.2.0
version: v0.2.1
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
version: v0.2.0
version: v0.2.1
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
containers:
- name: metrics-server
image: gcr.io/google_containers/metrics-server-amd64:v0.2.0
image: k8s.gcr.io/metrics-server-amd64:v0.2.1
command:
- /metrics-server
- --source=kubernetes.summary_api:''
@ -56,7 +57,7 @@ spec:
name: https
protocol: TCP
- name: metrics-server-nanny
image: gcr.io/google_containers/addon-resizer:1.8.1
image: k8s.gcr.io/addon-resizer:1.8.1
resources:
limits:
cpu: 100m
@ -81,10 +82,10 @@ spec:
- --config-dir=/etc/config
- --cpu=40m
- --extra-cpu=0.5m
- --memory=140Mi
- --memory=40Mi
- --extra-memory=4Mi
- --threshold=5
- --deployment=metrics-server-v0.2.0
- --deployment=metrics-server-v0.2.1
- --container=metrics-server
- --poll-period=300000
- --estimator=exponential

View File

@ -43,7 +43,7 @@ spec:
spec:
containers:
- name: node-problem-detector
image: gcr.io/google_containers/node-problem-detector:v0.4.1
image: k8s.gcr.io/node-problem-detector:v0.4.1
command:
- "/bin/sh"
- "-c"

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
IMAGE=gcr.io/google_containers/python
IMAGE=staging-k8s.gcr.io/python
VERSION=v1
.PHONY: build push
@ -21,5 +21,5 @@ build:
docker build --pull -t "$(IMAGE):$(VERSION)" .
push:
gcloud docker -- push "$(IMAGE):$(VERSION)"
docker push "$(IMAGE):$(VERSION)"

View File

@ -1,274 +0,0 @@
# Private Docker Registry in Kubernetes
Kubernetes offers an optional private Docker registry addon, which you can turn
on when you bring up a cluster or install later. This gives you a place to
store truly private Docker images for your cluster.
## How it works
The private registry runs as a `Pod` in your cluster. It does not currently
support SSL or authentication, which triggers Docker's "insecure registry"
logic. To work around this, we run a proxy on each node in the cluster,
exposing a port onto the node (via a hostPort), which Docker accepts as
"secure", since it is accessed by `localhost`.
## Turning it on
Some cluster installs (e.g. GCE) support this as a cluster-birth flag. The
`ENABLE_CLUSTER_REGISTRY` variable in `cluster/gce/config-default.sh` governs
whether the registry is run or not. To set this flag, you can specify
`KUBE_ENABLE_CLUSTER_REGISTRY=true` when running `kube-up.sh`. If your cluster
does not include this flag, the following steps should work. Note that some of
this is cloud-provider specific, so you may have to customize it a bit.
### Make some storage
The primary job of the registry is to store data. To do that we have to decide
where to store it. For cloud environments that have networked storage, we can
use Kubernetes's `PersistentVolume` abstraction. The following template is
expanded by `salt` in the GCE cluster turnup, but can easily be adapted to
other situations:
<!-- BEGIN MUNGE: EXAMPLE registry-pv.yaml.in -->
```yaml
kind: PersistentVolume
apiVersion: v1
metadata:
name: kube-system-kube-registry-pv
labels:
kubernetes.io/cluster-service: "true"
spec:
{% if pillar.get('cluster_registry_disk_type', '') == 'gce' %}
capacity:
storage: {{ pillar['cluster_registry_disk_size'] }}
accessModes:
- ReadWriteOnce
gcePersistentDisk:
pdName: "{{ pillar['cluster_registry_disk_name'] }}"
fsType: "ext4"
{% endif %}
```
<!-- END MUNGE: EXAMPLE registry-pv.yaml.in -->
If, for example, you wanted to use NFS you would just need to change the
`gcePersistentDisk` block to `nfs`. See
[here](https://kubernetes.io/docs/user-guide/volumes.md) for more details on volumes.
Note that in any case, the storage (in the case the GCE PersistentDisk) must be
created independently - this is not something Kubernetes manages for you (yet).
### I don't want or don't have persistent storage
If you are running in a place that doesn't have networked storage, or if you
just want to kick the tires on this without committing to it, you can easily
adapt the `ReplicationController` specification below to use a simple
`emptyDir` volume instead of a `persistentVolumeClaim`.
## Claim the storage
Now that the Kubernetes cluster knows that some storage exists, you can put a
claim on that storage. As with the `PersistentVolume` above, you can start
with the `salt` template:
<!-- BEGIN MUNGE: EXAMPLE registry-pvc.yaml.in -->
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: kube-registry-pvc
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ pillar['cluster_registry_disk_size'] }}
```
<!-- END MUNGE: EXAMPLE registry-pvc.yaml.in -->
This tells Kubernetes that you want to use storage, and the `PersistentVolume`
you created before will be bound to this claim (unless you have other
`PersistentVolumes` in which case those might get bound instead). This claim
gives you the right to use this storage until you release the claim.
## Run the registry
Now we can run a Docker registry:
<!-- BEGIN MUNGE: EXAMPLE registry-rc.yaml -->
```yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry-upstream
version: v0
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry-upstream
version: v0
template:
metadata:
labels:
k8s-app: kube-registry-upstream
version: v0
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
limits:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
persistentVolumeClaim:
claimName: kube-registry-pvc
```
<!-- END MUNGE: EXAMPLE registry-rc.yaml -->
## Expose the registry in the cluster
Now that we have a registry `Pod` running, we can expose it as a Service:
<!-- BEGIN MUNGE: EXAMPLE registry-svc.yaml -->
```yaml
apiVersion: v1
kind: Service
metadata:
name: kube-registry
namespace: kube-system
labels:
k8s-app: kube-registry-upstream
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeRegistry"
spec:
selector:
k8s-app: kube-registry-upstream
ports:
- name: registry
port: 5000
protocol: TCP
```
<!-- END MUNGE: EXAMPLE registry-svc.yaml -->
## Expose the registry on each node
Now that we have a running `Service`, we need to expose it onto each Kubernetes
`Node` so that Docker will see it as `localhost`. We can load a `Pod` on every
node by creating following daemonset.
<!-- BEGIN MUNGE: EXAMPLE ../../saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml -->
```yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-registry-proxy
namespace: kube-system
labels:
k8s-app: kube-registry-proxy
kubernetes.io/cluster-service: "true"
version: v0.4
spec:
template:
metadata:
labels:
k8s-app: kube-registry-proxy
kubernetes.io/name: "kube-registry-proxy"
kubernetes.io/cluster-service: "true"
version: v0.4
spec:
containers:
- name: kube-registry-proxy
image: gcr.io/google_containers/kube-registry-proxy:0.4
resources:
limits:
cpu: 100m
memory: 50Mi
env:
- name: REGISTRY_HOST
value: kube-registry.kube-system.svc.cluster.local
- name: REGISTRY_PORT
value: "5000"
ports:
- name: registry
containerPort: 80
hostPort: 5000
```
<!-- END MUNGE: EXAMPLE ../../saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml -->
When modifying replication-controller, service and daemon-set defintions, take
care to ensure _unique_ identifiers for the rc-svc couple and the daemon-set.
Failing to do so will have register the localhost proxy daemon-sets to the
upstream service. As a result they will then try to proxy themselves, which
will, for obvious reasons, not work.
This ensures that port 5000 on each node is directed to the registry `Service`.
You should be able to verify that it is running by hitting port 5000 with a web
browser and getting a 404 error:
```console
$ curl localhost:5000
404 page not found
```
## Using the registry
To use an image hosted by this registry, simply say this in your `Pod`'s
`spec.containers[].image` field:
```yaml
image: localhost:5000/user/container
```
Before you can use the registry, you have to be able to get images into it,
though. If you are building an image on your Kubernetes `Node`, you can spell
out `localhost:5000` when you build and push. More likely, though, you are
building locally and want to push to your cluster.
You can use `kubectl` to set up a port-forward from your local node to a
running Pod:
```console
$ POD=$(kubectl get pods --namespace kube-system -l k8s-app=kube-registry-upstream \
-o template --template '{{range .items}}{{.metadata.name}} {{.status.phase}}{{"\n"}}{{end}}' \
| grep Running | head -1 | cut -f1 -d' ')
$ kubectl port-forward --namespace kube-system $POD 5000:5000 &
```
Now you can build and push images on your local computer as
`localhost:5000/yourname/container` and those images will be available inside
your kubernetes cluster with the same name.
# More Extensions
- [Use GCS as storage backend](gcs/README.md)
- [Enable TLS/SSL](tls/README.md)
- [Enable Authentication](auth/README.md)
## Future improvements
* Allow port-forwarding to a Service rather than a pod (#15180)
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/README.md?pixel)]()

View File

@ -1,92 +0,0 @@
# Enable Authentication with Htpasswd for Kube-Registry
Docker registry support a few authentication providers. Full list of supported provider can be found [here](https://docs.docker.com/registry/configuration/#auth). This document describes how to enable authentication with htpasswd for kube-registry.
### Prepare Htpasswd Secret
Please generate your own htpasswd file. Assuming the file you generated is `htpasswd`.
Creating secret to hold htpasswd...
```console
$ kubectl --namespace=kube-system create secret generic registry-auth-secret --from-file=htpasswd=htpasswd
```
### Run Registry
Please be noted that this sample rc is using emptyDir as storage backend for simplicity.
<!-- BEGIN MUNGE: EXAMPLE registry-auth-rc.yaml -->
```yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
- name: REGISTRY_AUTH_HTPASSWD_REALM
value: basic_realm
- name: REGISTRY_AUTH_HTPASSWD_PATH
value: /auth/htpasswd
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
- name: auth-dir
mountPath: /auth
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
emptyDir: {}
- name: auth-dir
secret:
secretName: registry-auth-secret
```
<!-- END MUNGE: EXAMPLE registry-auth-rc.yaml -->
No changes are needed for other components (kube-registry service and proxy).
### To Verify
Setup proxy or port-forwarding to the kube-registry. Image push/pull should fail without authentication. Then use `docker login` to authenticate with kube-registry and see if it works.
### Configure Nodes to Authenticate with Kube-Registry
By default, nodes assume no authentication is required by kube-registry. Without authentication, nodes cannot pull images from kube-registry. To solve this, more documentation can be found [Here](https://github.com/kubernetes/kubernetes.github.io/blob/master/docs/concepts/containers/images.md#configuring-nodes-to-authenticate-to-a-private-repository).
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/auth/README.md?pixel)]()

View File

@ -1,56 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
- name: REGISTRY_AUTH_HTPASSWD_REALM
value: basic_realm
- name: REGISTRY_AUTH_HTPASSWD_PATH
value: /auth/htpasswd
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
- name: auth-dir
mountPath: /auth
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
emptyDir: {}
- name: auth-dir
secret:
secretName: registry-auth-secret

View File

@ -1,81 +0,0 @@
# Kube-Registry with GCS storage backend
Besides local file system, docker registry also supports a number of cloud storage backends. Full list of supported backend can be found [here](https://docs.docker.com/registry/configuration/#storage). This document describes how to enable GCS for kube-registry as storage backend.
A few preparation steps are needed.
1. Create a bucket named kube-registry in GCS.
1. Create a service account for GCS access and create key file in json format. Detail instruction can be found [here](https://cloud.google.com/storage/docs/authentication#service_accounts).
### Pack Keyfile into a Secret
Assuming you have downloaded the keyfile as `keyfile.json`. Create secret with the `keyfile.json`...
```console
$ kubectl --namespace=kube-system create secret generic gcs-key-secret --from-file=keyfile=keyfile.json
```
### Run Registry
<!-- BEGIN MUNGE: EXAMPLE registry-gcs-rc.yaml -->
```yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE
value: gcs
- name: REGISTRY_STORAGE_GCS_BUCKET
value: kube-registry
- name: REGISTRY_STORAGE_GCS_KEYFILE
value: /gcs/keyfile
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumeMounts:
- name: gcs-key
mountPath: /gcs
volumes:
- name: gcs-key
secret:
secretName: gcs-key-secret
```
<!-- END MUNGE: EXAMPLE registry-gcs-rc.yaml -->
No changes are needed for other components (kube-registry service and proxy).
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/gcs/README.md?pixel)]()

View File

@ -1,52 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE
value: gcs
- name: REGISTRY_STORAGE_GCS_BUCKET
value: kube-registry
- name: REGISTRY_STORAGE_GCS_KEYFILE
value: /gcs/keyfile
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumeMounts:
- name: gcs-key
mountPath: /gcs
volumes:
- name: gcs-key
secret:
secretName: gcs-key-secret

View File

@ -1,26 +0,0 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM nginx:1.11
RUN apt-get update \
&& apt-get install -y \
curl \
--no-install-recommends \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /usr/share/man /usr/share/doc
COPY rootfs /
CMD ["/bin/boot"]

View File

@ -1,24 +0,0 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.PHONY: build push vet test clean
TAG = 0.4
REPO = gcr.io/google_containers/kube-registry-proxy
build:
docker build --pull -t $(REPO):$(TAG) .
push:
gcloud docker -- push $(REPO):$(TAG)

View File

@ -1,23 +0,0 @@
#!/usr/bin/env bash
# fail if no hostname is provided
REGISTRY_HOST=${REGISTRY_HOST:?no host}
REGISTRY_PORT=${REGISTRY_PORT:-5000}
# we are always listening on port 80
# https://github.com/nginxinc/docker-nginx/blob/43c112100750cbd1e9f2160324c64988e7920ac9/stable/jessie/Dockerfile#L25
PORT=80
sed -e "s/%HOST%/$REGISTRY_HOST/g" \
-e "s/%PORT%/$REGISTRY_PORT/g" \
-e "s/%BIND_PORT%/$PORT/g" \
</etc/nginx/conf.d/default.conf.in >/etc/nginx/conf.d/default.conf
# wait for registry to come online
while ! curl -sS "$REGISTRY_HOST:$REGISTRY_PORT" &>/dev/null; do
printf "waiting for the registry (%s:%s) to come online...\n" "$REGISTRY_HOST" "$REGISTRY_PORT"
sleep 1
done
printf "starting proxy...\n"
exec nginx -g "daemon off;" "$@"

View File

@ -1,28 +0,0 @@
# Docker registry proxy for api version 2
upstream docker-registry {
server %HOST%:%PORT%;
}
# No client auth or TLS
# TODO(bacongobbler): experiment with authenticating the registry if it's using TLS
server {
listen %BIND_PORT%;
server_name localhost;
# disable any limits to avoid HTTP 413 for large image uploads
client_max_body_size 0;
# required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486)
chunked_transfer_encoding on;
location / {
# Do not allow connections from docker 1.5 and earlier
# docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents
if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) {
return 404;
}
include docker-registry.conf;
}
}

View File

@ -1,6 +0,0 @@
proxy_pass http://docker-registry;
proxy_set_header Host $http_host; # required for docker client's sake
proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 900;

View File

@ -1,26 +0,0 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
include /etc/nginx/conf.d/*.conf;
}

View File

@ -1,17 +0,0 @@
kind: PersistentVolume
apiVersion: v1
metadata:
name: kube-system-kube-registry-pv
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
{% if pillar.get('cluster_registry_disk_type', '') == 'gce' %}
capacity:
storage: {{ pillar['cluster_registry_disk_size'] }}
accessModes:
- ReadWriteOnce
gcePersistentDisk:
pdName: "{{ pillar['cluster_registry_disk_name'] }}"
fsType: "ext4"
{% endif %}

View File

@ -1,14 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: kube-registry-pvc
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ pillar['cluster_registry_disk_size'] }}

View File

@ -1,49 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry-upstream
version: v0
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
replicas: 1
selector:
k8s-app: kube-registry-upstream
version: v0
template:
metadata:
labels:
k8s-app: kube-registry-upstream
version: v0
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2.5.1
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
persistentVolumeClaim:
claimName: kube-registry-pvc

View File

@ -1,17 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: kube-registry
namespace: kube-system
labels:
k8s-app: kube-registry-upstream
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeRegistry"
spec:
selector:
k8s-app: kube-registry-upstream
ports:
- name: registry
port: 5000
protocol: TCP

View File

@ -1,116 +0,0 @@
# Enable TLS for Kube-Registry
This document describes how to enable TLS for kube-registry. Before you start, please check if you have all the prerequisite:
- A domain for kube-registry. Assuming it is ` myregistrydomain.com`.
- Domain certificate and key. Assuming they are `domain.crt` and `domain.key`
### Pack domain.crt and domain.key into a Secret
```console
$ kubectl --namespace=kube-system create secret generic registry-tls-secret --from-file=domain.crt=domain.crt --from-file=domain.key=domain.key
```
### Run Registry
Please be noted that this sample rc is using emptyDir as storage backend for simplicity.
<!-- BEGIN MUNGE: EXAMPLE registry-tls-rc.yaml -->
```yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
- name: REGISTRY_HTTP_TLS_CERTIFICATE
value: /certs/domain.crt
- name: REGISTRY_HTTP_TLS_KEY
value: /certs/domain.key
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
- name: cert-dir
mountPath: /certs
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
emptyDir: {}
- name: cert-dir
secret:
secretName: registry-tls-secret
```
<!-- END MUNGE: EXAMPLE registry-tls-rc.yaml -->
### Expose External IP for Kube-Registry
Modify the default kube-registry service to `LoadBalancer` type and point the DNS record of `myregistrydomain.com` to the service external ip.
<!-- BEGIN MUNGE: EXAMPLE registry-tls-svc.yaml -->
```yaml
apiVersion: v1
kind: Service
metadata:
name: kube-registry
namespace: kube-system
labels:
k8s-app: kube-registry
# kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeRegistry"
spec:
selector:
k8s-app: kube-registry
type: LoadBalancer
ports:
- name: registry
port: 5000
protocol: TCP
```
<!-- END MUNGE: EXAMPLE registry-tls-svc.yaml -->
### To Verify
Now you should be able to access your kube-registry from another docker host.
```console
docker pull busybox
docker tag busybox myregistrydomain.com:5000/busybox
docker push myregistrydomain.com:5000/busybox
docker pull myregistrydomain.com:5000/busybox
```
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/tls/README.md?pixel)]()

View File

@ -1,57 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
- name: REGISTRY_HTTP_TLS_CERTIFICATE
value: /certs/domain.crt
- name: REGISTRY_HTTP_TLS_KEY
value: /certs/domain.key
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
- name: cert-dir
mountPath: /certs
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
emptyDir: {}
- name: cert-dir
secret:
secretName: registry-tls-secret

View File

@ -1,17 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: kube-registry
namespace: kube-system
labels:
k8s-app: kube-registry
# kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeRegistry"
spec:
selector:
k8s-app: kube-registry
type: LoadBalancer
ports:
- name: registry
port: 5000
protocol: TCP

View File

@ -1,26 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/hack/lib/util.sh"
echo -e "${color_red}WARNING${color_norm}: The bash deployment for AWS is obsolete. The" >&2
echo -e "v1.5.x releases are the last to support cluster/kube-up.sh with AWS." >&2
echo "For a list of viable alternatives, see:" >&2
echo >&2
echo " http://kubernetes.io/docs/getting-started-guides/aws/" >&2
echo >&2
exit 1

2
vendor/k8s.io/kubernetes/cluster/centos/OWNERS generated vendored Normal file
View File

@ -0,0 +1,2 @@
reviewers:
- zouyee

View File

@ -118,9 +118,13 @@ ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
# define the IP range used for flannel overlay network, should not conflict with above SERVICE_CLUSTER_IP_RANGE
export FLANNEL_NET=${FLANNEL_NET:-"172.16.0.0/16"}
# Admission Controllers to invoke prior to persisting objects in cluster
# If we included ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely.
export ADMISSION_CONTROL=${ADMISSION_CONTROL:-"Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeClaimResize,DefaultTolerationSeconds,Priority,PVCProtection,ResourceQuota"}
# Admission Controllers to invoke prior to persisting objects in cluster.
# MutatingAdmissionWebhook should be the last controller that modifies the
# request object, otherwise users will be confused if the mutating webhooks'
# modification is overwritten.
# If we included ResourceQuota, we should keep it at the end of the list to
# prevent incrementing quota usage prematurely.
export ADMISSION_CONTROL=${ADMISSION_CONTROL:-"Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeClaimResize,DefaultTolerationSeconds,Priority,StorageObjectInUseProtection,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"}
# Extra options to set on the Docker command line.
# This is useful for setting --insecure-registry for local registries.

View File

@ -33,29 +33,6 @@ mkdir -p "$cert_dir"
use_cn=false
# TODO: Add support for discovery on other providers?
if [ "$cert_ip" == "_use_gce_external_ip_" ]; then
cert_ip=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip)
fi
if [ "$cert_ip" == "_use_aws_external_ip_" ]; then
# If there's no public IP assigned (e.g. this host is running on an internal subnet in a VPC), then
# curl will happily spit out the contents of AWS's 404 page and an exit code of zero.
#
# The string containing the 404 page trips up one of easyrsa's calls to openssl later; whichever
# one creates the CA certificate, because the 404 page is > 64 characters.
if cert_ip=$(curl -f -s http://169.254.169.254/latest/meta-data/public-ipv4); then
:
else
cert_ip=$(curl -f -s http://169.254.169.254/latest/meta-data/local-ipv4)
fi
fi
if [ "$cert_ip" == "_use_azure_dns_name_" ]; then
cert_ip=$(uname -n | awk -F. '{ print $2 }').cloudapp.net
use_cn=true
fi
sans="IP:${cert_ip}"
if [[ -n "${extra_sans}" ]]; then
sans="${sans},${extra_sans}"

View File

@ -234,7 +234,7 @@ echo "[INFO] tear-down-node on $1"
# Generate the CA certificates for k8s components
function make-ca-cert() {
echo "[INFO] make-ca-cert"
bash "${ROOT}/../saltbase/salt/generate-cert/make-ca-cert.sh" "${MASTER_ADVERTISE_IP}" "IP:${MASTER_ADVERTISE_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local"
bash "${ROOT}/make-ca-cert.sh" "${MASTER_ADVERTISE_IP}" "IP:${MASTER_ADVERTISE_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local"
}
# Provision master

View File

@ -84,9 +84,14 @@ function get_bin() {
"${KUBE_ROOT}/_output/bin/${bin}"
"${KUBE_ROOT}/_output/dockerized/bin/${host_os}/${host_arch}/${bin}"
"${KUBE_ROOT}/_output/local/bin/${host_os}/${host_arch}/${bin}"
"${KUBE_ROOT}/bazel-bin/${srcdir}/${bin}"
"${KUBE_ROOT}/platforms/${host_os}/${host_arch}/${bin}"
)
# Also search for binary in bazel build tree.
# The bazel go rules place binaries in subtrees like
# "bazel-bin/source/path/linux_amd64_pure_stripped/binaryname", so make sure
# the platform name is matched in the path.
locations+=($(find "${KUBE_ROOT}/bazel-bin/${srcdir}" -type f -executable \
-path "*/${host_os}_${host_arch}*/${bin}" 2>/dev/null || true) )
echo $( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 )
}

File diff suppressed because it is too large Load Diff

View File

@ -1,26 +1,17 @@
package(default_visibility = ["//visibility:public"])
load("@io_bazel//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
load("@io_kubernetes_build//defs:build.bzl", "release_filegroup")
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
pkg_tar(
name = "gci-trusty-manifests",
files = [
"container-linux/configure-helper.sh",
"gci/configure-helper.sh",
"gci/health-monitor.sh",
"//cluster/gce/gci/mounter",
],
files = {
"//cluster/gce/gci/mounter": "gci-mounter",
"gci/configure-helper.sh": "gci-configure-helper.sh",
"gci/health-monitor.sh": "health-monitor.sh",
},
mode = "0755",
strip_prefix = ".",
# pkg_tar doesn't support renaming the files we add, so instead create symlinks.
symlinks = {
"container-linux-configure-helper.sh": "container-linux/configure-helper.sh",
"gci-configure-helper.sh": "gci/configure-helper.sh",
"health-monitor.sh": "gci/health-monitor.sh",
"gci-mounter": "gci/mounter/mounter",
"trusty-configure-helper.sh": "trusty/configure-helper.sh",
},
)
filegroup(
@ -40,15 +31,32 @@ filegroup(
tags = ["automanaged"],
)
# Having the configure-vm.sh script and and trusty code from the GCE cluster
# deploy hosted with the release is useful for GKE.
# This list should match the list in kubernetes/release/lib/releaselib.sh.
# Having the COS code from the GCE cluster deploy hosted with the release is
# useful for GKE. This list should match the list in
# kubernetes/release/lib/releaselib.sh.
release_filegroup(
name = "gcs-release-artifacts",
srcs = [
"configure-vm.sh",
"gci/configure.sh",
"gci/master.yaml",
"gci/node.yaml",
],
)
pkg_tar(
name = "gce-master-manifests",
srcs = [
"manifests/abac-authz-policy.jsonl",
"manifests/cluster-autoscaler.manifest",
"manifests/e2e-image-puller.manifest",
"manifests/etcd.manifest",
"manifests/glbc.manifest",
"manifests/kube-addon-manager.yaml",
"manifests/kube-apiserver.manifest",
"manifests/kube-controller-manager.manifest",
"manifests/kube-proxy.manifest",
"manifests/kube-scheduler.manifest",
"manifests/rescheduler.manifest",
],
mode = "0644",
)

View File

@ -3,8 +3,12 @@ reviewers:
- gmarek
- jszczepkowski
- vishh
- mwielgus
- MaciekPytel
approvers:
- bowei
- gmarek
- jszczepkowski
- vishh
- mwielgus
- MaciekPytel

View File

@ -1,6 +1,6 @@
package(default_visibility = ["//visibility:public"])
load("@io_bazel//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
filegroup(
name = "addon-srcs",
@ -16,10 +16,10 @@ filegroup(
pkg_tar(
name = "addons",
extension = "tar.gz",
files = [
srcs = [
":addon-srcs",
],
extension = "tar.gz",
mode = "0644",
strip_prefix = ".",
)

View File

@ -0,0 +1,30 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cloud-provider
subjects:
- kind: ServiceAccount
name: cloud-provider
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cloud-provider
subjects:
- kind: ServiceAccount
name: cloud-provider
namespace: kube-system

View File

@ -0,0 +1,35 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- get
- patch
- update
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update

View File

@ -3,7 +3,7 @@ kind: PodSecurityPolicy
metadata:
name: gce.unprivileged-addon
annotations:
kubernetes.io/description: 'This policy grants the minimum ammount of
kubernetes.io/description: 'This policy grants the minimum amount of
privilege necessary to run non-privileged kube-system pods. This policy is
not intended for use outside of kube-system, and may include further
restrictions in the future.'

View File

@ -98,4 +98,6 @@ function get-cluster-ip-range {
echo "${suggested_range}"
}
# NOTE: Avoid giving nodes empty scopes, because kubelet needs a service account
# in order to initialize properly.
NODE_SCOPES="${NODE_SCOPES:-monitoring,logging-write,storage-ro}"

View File

@ -39,7 +39,7 @@ NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
NODE_LOCAL_SSDS=${NODE_LOCAL_SSDS:-0}
# An extension to local SSDs allowing users to specify block/fs and SCSI/NVMe devices
# Format of this variable will be "#,scsi/nvme,block/fs" you can specify multiple
# configurations by seperating them by a semi-colon ex. "2,scsi,fs;1,nvme,block"
# configurations by separating them by a semi-colon ex. "2,scsi,fs;1,nvme,block"
# is a request for 2 SCSI formatted and mounted SSDs and 1 NVMe block device SSD.
NODE_LOCAL_SSDS_EXT=${NODE_LOCAL_SSDS_EXT:-}
# Accelerators to be attached to each node. Format "type=<accelerator-type>,count=<accelerator-count>"
@ -54,12 +54,6 @@ CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false}
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
if [[ "${MASTER_OS_DISTRIBUTION}" == "coreos" ]]; then
MASTER_OS_DISTRIBUTION="container-linux"
fi
if [[ "${NODE_OS_DISTRIBUTION}" == "coreos" ]]; then
NODE_OS_DISTRIBUTION="container-linux"
fi
if [[ "${MASTER_OS_DISTRIBUTION}" == "cos" ]]; then
MASTER_OS_DISTRIBUTION="gci"
@ -80,7 +74,7 @@ fi
# Also please update corresponding image for node e2e at:
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
CVM_VERSION=${CVM_VERSION:-container-vm-v20170627}
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-60-9592-90-0}
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-63-10032-71-0}
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}}
@ -88,13 +82,16 @@ NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-cos-cloud}
NODE_SERVICE_ACCOUNT=${KUBE_GCE_NODE_SERVICE_ACCOUNT:-default}
CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker}
CONTAINER_RUNTIME_ENDPOINT=${KUBE_CONTAINER_RUNTIME_ENDPOINT:-}
LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-docker load -i}
CONTAINER_RUNTIME_NAME=${KUBE_CONTAINER_RUNTIME_NAME:-}
LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-}
RKT_VERSION=${KUBE_RKT_VERSION:-1.23.0}
RKT_STAGE1_IMAGE=${KUBE_RKT_STAGE1_IMAGE:-coreos.com/rkt/stage1-coreos}
# MASTER_EXTRA_METADATA is the extra instance metadata on master instance separated by commas.
MASTER_EXTRA_METADATA=${KUBE_MASTER_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}}
# MASTER_EXTRA_METADATA is the extra instance metadata on node instance separated by commas.
NODE_EXTRA_METADATA=${KUBE_NODE_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}}
# KUBELET_TEST_ARGS are extra arguments passed to kubelet.
KUBELET_TEST_ARGS=${KUBE_KUBELET_EXTRA_ARGS:-}
NETWORK=${KUBE_GCE_NETWORK:-default}
# Enable network deletion by default (for kube-down), unless we're using 'default' network.
@ -121,11 +118,16 @@ MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
# It is the primary range in the subnet and is the range used for node instance IPs.
NODE_IP_RANGE="$(get-node-ip-range)"
# NOTE: Avoid giving nodes empty scopes, because kubelet needs a service account
# in order to initialize properly.
NODE_SCOPES="${NODE_SCOPES:-monitoring,logging-write,storage-ro}"
# Extra docker options for nodes.
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-}"
VOLUME_PLUGIN_DIR="${VOLUME_PLUGIN_DIR:-/home/kubernetes/flexvolume}"
REMOUNT_VOLUME_PLUGIN_DIR="${REMOUNT_VOLUME_PLUGIN_DIR:-true}"
SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/16}" # formerly PORTAL_NET
ALLOCATE_NODE_CIDRS=true
@ -159,7 +161,7 @@ ENABLE_METRICS_SERVER="${KUBE_ENABLE_METRICS_SERVER:-true}"
ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}"
# Version tag of metadata agent
METADATA_AGENT_VERSION="${KUBE_METADATA_AGENT_VERSION:-0.2-0.0.13-5-watch}"
METADATA_AGENT_VERSION="${KUBE_METADATA_AGENT_VERSION:-0.2-0.0.16-1}"
# One special node out of NUM_NODES would be created of this type if specified.
# Useful for scheduling heapster in large clusters with nodes of small size.
@ -190,7 +192,7 @@ if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then
# Put the necessary label on the node so the daemonset gets scheduled.
NODE_LABELS="${NODE_LABELS},beta.kubernetes.io/metadata-proxy-ready=true"
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_METADATA_CONCEALMENT"
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_METADATA_CONCEALMENT METADATA_CONCEALMENT_NO_FIREWALL"
fi
# Optional: Enable node logging.
@ -229,12 +231,6 @@ DNS_DOMAIN="${KUBE_DNS_DOMAIN:-cluster.local}"
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}"
# Optional: Install cluster docker registry.
ENABLE_CLUSTER_REGISTRY="${KUBE_ENABLE_CLUSTER_REGISTRY:-false}"
CLUSTER_REGISTRY_DISK="${CLUSTER_REGISTRY_PD:-${INSTANCE_PREFIX}-kube-system-kube-registry}"
CLUSTER_REGISTRY_DISK_SIZE="${CLUSTER_REGISTRY_DISK_SIZE:-200GB}"
CLUSTER_REGISTRY_DISK_TYPE_GCE="${CLUSTER_REGISTRY_DISK_TYPE_GCE:-pd-standard}"
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
@ -271,14 +267,19 @@ ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
# IP_ALIAS_SUBNETWORK is the subnetwork to allocate from. If empty, a
# new subnetwork will be created for the cluster.
ENABLE_IP_ALIASES=${KUBE_GCE_ENABLE_IP_ALIASES:-false}
NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-RangeAllocator}
if [ ${ENABLE_IP_ALIASES} = true ]; then
# Size of ranges allocated to each node. Currently supports only /32 and /24.
IP_ALIAS_SIZE=${KUBE_GCE_IP_ALIAS_SIZE:-/24}
IP_ALIAS_SUBNETWORK=${KUBE_GCE_IP_ALIAS_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-default}
# Reserve the services IP space to avoid being allocated for other GCP resources.
SERVICE_CLUSTER_IP_SUBNETWORK=${KUBE_GCE_SERVICE_CLUSTER_IP_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-services}
NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-CloudAllocator}
SECONDARY_RANGE_NAME=${SECONDARY_RANGE_NAME:-}
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_IP_ALIASES"
PROVIDER_VARS="${PROVIDER_VARS:-} NODE_IPAM_MODE"
PROVIDER_VARS="${PROVIDER_VARS:-} SECONDARY_RANGE_NAME"
fi
# Enable GCE Alpha features.
@ -297,12 +298,17 @@ if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
fi
# Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,NodeRestriction,Priority,PVCProtection
ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
fi
# MutatingAdmissionWebhook should be the last controller that modifies the
# request object, otherwise users will be confused if the mutating webhooks'
# modification is overwritten.
ADMISSION_CONTROL="${ADMISSION_CONTROL},MutatingAdmissionWebhook,ValidatingAdmissionWebhook"
# ResourceQuota must come last, or a creation is recorded, but the pod was forbidden.
ADMISSION_CONTROL="${ADMISSION_CONTROL},ResourceQuota"
@ -313,10 +319,7 @@ KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
STORAGE_BACKEND=${STORAGE_BACKEND:-}
# Networking plugin specific settings.
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, opencontrail, kubenet
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, kubenet
# Network Policy plugin specific settings.
NETWORK_POLICY_PROVIDER="${NETWORK_POLICY_PROVIDER:-none}" # calico
@ -340,10 +343,6 @@ ENABLE_DEFAULT_STORAGE_CLASS="${ENABLE_DEFAULT_STORAGE_CLASS:-true}"
# Optional: Enable legacy ABAC policy that makes all service accounts superusers.
ENABLE_LEGACY_ABAC="${ENABLE_LEGACY_ABAC:-false}" # true, false
# TODO(dawn1107): Remove this once the flag is built into CVM image.
# Kernel panic upon soft lockup issue
SOFTLOCKUP_PANIC="${SOFTLOCKUP_PANIC:-false}" # true, false
# Indicates if the values (i.e. KUBE_USER and KUBE_PASSWORD for basic
# authentication) in metadata should be treated as canonical, and therefore disk
# copies ought to be recreated/clobbered.
@ -363,9 +362,10 @@ if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
fi
# Fluentd requirements
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-300Mi}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-100m}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-200Mi}"
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.2-1.5.28-1}"
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}"
# Heapster requirements
HEAPSTER_GCP_BASE_MEMORY="${HEAPSTER_GCP_BASE_MEMORY:-140Mi}"
@ -374,7 +374,7 @@ HEAPSTER_GCP_BASE_CPU="${HEAPSTER_GCP_BASE_CPU:-80m}"
HEAPSTER_GCP_CPU_PER_NODE="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE"
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE"
# prometheus-to-sd configuration
PROMETHEUS_TO_SD_ENDPOINT="${PROMETHEUS_TO_SD_ENDPOINT:-https://monitoring.googleapis.com/}"
@ -400,3 +400,9 @@ ROTATE_CERTIFICATES="${ROTATE_CERTIFICATES:-}"
# The number of services that are allowed to sync concurrently. Will be passed
# into kube-controller-manager via `--concurrent-service-syncs`
CONCURRENT_SERVICE_SYNCS="${CONCURRENT_SERVICE_SYNCS:-}"
if [[ "${ENABLE_TOKENREQUEST:-}" == "true" ]]; then
FEATURE_GATES="${FEATURE_GATES},TokenRequest=true"
SERVICEACCOUNT_ISSUER="https://kubernetes.io/${CLUSTER_NAME}"
SERVICEACCOUNT_API_AUDIENCES="https://kubernetes.default.svc"
fi

View File

@ -37,6 +37,11 @@ MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-$(get-master-root-disk-size)}
NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard}
NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
NODE_LOCAL_SSDS=${NODE_LOCAL_SSDS:-0}
# An extension to local SSDs allowing users to specify block/fs and SCSI/NVMe devices
# Format of this variable will be "#,scsi/nvme,block/fs" you can specify multiple
# configurations by separating them by a semi-colon ex. "2,scsi,fs;1,nvme,block"
# is a request for 2 SCSI formatted and mounted SSDs and 1 NVMe block device SSD.
NODE_LOCAL_SSDS_EXT=${NODE_LOCAL_SSDS_EXT:-}
NODE_ACCELERATORS=${NODE_ACCELERATORS:-""}
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true}
KUBE_APISERVER_REQUEST_TIMEOUT=300
@ -48,13 +53,6 @@ CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false}
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
if [[ "${MASTER_OS_DISTRIBUTION}" == "coreos" ]]; then
MASTER_OS_DISTRIBUTION="container-linux"
fi
if [[ "${NODE_OS_DISTRIBUTION}" == "coreos" ]]; then
NODE_OS_DISTRIBUTION="container-linux"
fi
if [[ "${MASTER_OS_DISTRIBUTION}" == "cos" ]]; then
MASTER_OS_DISTRIBUTION="gci"
fi
@ -74,7 +72,7 @@ fi
# Also please update corresponding image for node e2e at:
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
CVM_VERSION=${CVM_VERSION:-container-vm-v20170627}
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-60-9592-90-0}
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-63-10032-71-0}
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}}
@ -82,7 +80,8 @@ NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-cos-cloud}
NODE_SERVICE_ACCOUNT=${KUBE_GCE_NODE_SERVICE_ACCOUNT:-default}
CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker}
CONTAINER_RUNTIME_ENDPOINT=${KUBE_CONTAINER_RUNTIME_ENDPOINT:-}
LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-docker load -i}
CONTAINER_RUNTIME_NAME=${KUBE_CONTAINER_RUNTIME_NAME:-}
LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-}
GCI_DOCKER_VERSION=${KUBE_GCI_DOCKER_VERSION:-}
RKT_VERSION=${KUBE_RKT_VERSION:-1.23.0}
RKT_STAGE1_IMAGE=${KUBE_RKT_STAGE1_IMAGE:-coreos.com/rkt/stage1-coreos}
@ -147,11 +146,21 @@ ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
# TODO(piosz) remove this option once Metrics Server became a stable thing.
ENABLE_METRICS_SERVER="${KUBE_ENABLE_METRICS_SERVER:-true}"
# Optional: Metadata agent to setup as part of the cluster bring up:
# none - No metadata agent
# stackdriver - Stackdriver metadata agent
# Metadata agent is a daemon set that provides metadata of kubernetes objects
# running on the same node for exporting metrics and logs.
ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}"
# Version tag of metadata agent
METADATA_AGENT_VERSION="${KUBE_METADATA_AGENT_VERSION:-0.2-0.0.16-1}"
# One special node out of NUM_NODES would be created of this type if specified.
# Useful for scheduling heapster in large clusters with nodes of small size.
HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
# Set etcd image (e.g. gcr.io/google_containers/etcd) and version (e.g. 3.1.10) if you need
# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.2.14) if you need
# non-default version.
ETCD_IMAGE="${TEST_ETCD_IMAGE:-}"
ETCD_DOCKER_REPOSITORY="${TEST_ETCD_DOCKER_REPOSITORY:-}"
@ -166,15 +175,8 @@ CONTROLLER_MANAGER_TEST_LOG_LEVEL="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-$TEST_CL
SCHEDULER_TEST_LOG_LEVEL="${SCHEDULER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
KUBEPROXY_TEST_LOG_LEVEL="${KUBEPROXY_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
# TODO: change this and flex e2e test when default flex volume install path is changed for GCI
# Set flex dir to one that's readable from controller-manager container and writable by the flex e2e test.
if [[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]]; then
CONTROLLER_MANAGER_TEST_VOLUME_PLUGIN_DIR="--flex-volume-plugin-dir=/etc/srv/kubernetes/kubelet-plugins/volume/exec"
fi
# Set flex dir to one that's readable from kubelet and writable by the flex e2e test.
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]] || ([[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]] && [[ "${REGISTER_MASTER_KUBELET}" == "false" ]]); then
KUBELET_TEST_VOLUME_PLUGIN_DIR="--volume-plugin-dir=/etc/srv/kubernetes/kubelet-plugins/volume/exec"
fi
VOLUME_PLUGIN_DIR="${VOLUME_PLUGIN_DIR:-/home/kubernetes/flexvolume}"
REMOUNT_VOLUME_PLUGIN_DIR="${REMOUNT_VOLUME_PLUGIN_DIR:-true}"
TEST_CLUSTER_DELETE_COLLECTION_WORKERS="${TEST_CLUSTER_DELETE_COLLECTION_WORKERS:---delete-collection-workers=1}"
TEST_CLUSTER_MAX_REQUESTS_INFLIGHT="${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT:-}"
@ -183,7 +185,7 @@ TEST_CLUSTER_RESYNC_PERIOD="${TEST_CLUSTER_RESYNC_PERIOD:---min-resync-period=3m
# ContentType used by all components to communicate with apiserver.
TEST_CLUSTER_API_CONTENT_TYPE="${TEST_CLUSTER_API_CONTENT_TYPE:-}"
KUBELET_TEST_ARGS="${KUBELET_TEST_ARGS:-} --max-pods=110 --serialize-image-pulls=false ${TEST_CLUSTER_API_CONTENT_TYPE} ${KUBELET_TEST_VOLUME_PLUGIN_DIR:-}"
KUBELET_TEST_ARGS="${KUBELET_TEST_ARGS:-} --max-pods=110 --serialize-image-pulls=false ${TEST_CLUSTER_API_CONTENT_TYPE}"
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]] || [[ "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]]; then
NODE_KUBELET_TEST_ARGS=" --experimental-kernel-memcg-notification=true"
fi
@ -191,7 +193,7 @@ if [[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]] || [[ "${MASTER_OS_DISTRIBUTION}"
MASTER_KUBELET_TEST_ARGS=" --experimental-kernel-memcg-notification=true"
fi
APISERVER_TEST_ARGS="${APISERVER_TEST_ARGS:-} --runtime-config=extensions/v1beta1 ${TEST_CLUSTER_DELETE_COLLECTION_WORKERS} ${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT}"
CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE} ${CONTROLLER_MANAGER_TEST_VOLUME_PLUGIN_DIR:-}"
CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE}"
SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
KUBEPROXY_TEST_ARGS="${KUBEPROXY_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
@ -218,7 +220,7 @@ if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then
# Put the necessary label on the node so the daemonset gets scheduled.
NODE_LABELS="${NODE_LABELS},beta.kubernetes.io/metadata-proxy-ready=true"
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_METADATA_CONCEALMENT"
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_METADATA_CONCEALMENT METADATA_CONCEALMENT_NO_FIREWALL"
fi
# Optional: Enable node logging.
@ -251,12 +253,6 @@ DNS_DOMAIN="cluster.local"
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}"
# Optional: Install cluster docker registry.
ENABLE_CLUSTER_REGISTRY="${KUBE_ENABLE_CLUSTER_REGISTRY:-false}"
CLUSTER_REGISTRY_DISK="${CLUSTER_REGISTRY_DISK:-${INSTANCE_PREFIX}-kube-system-kube-registry}"
CLUSTER_REGISTRY_DISK_SIZE="${CLUSTER_REGISTRY_DISK_SIZE:-200GB}"
CLUSTER_REGISTRY_DISK_TYPE_GCE="${CLUSTER_REGISTRY_DISK_TYPE_GCE:-pd-standard}"
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
@ -293,14 +289,19 @@ ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
# IP_ALIAS_SUBNETWORK is the subnetwork to allocate from. If empty, a
# new subnetwork will be created for the cluster.
ENABLE_IP_ALIASES=${KUBE_GCE_ENABLE_IP_ALIASES:-false}
NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-RangeAllocator}
if [ ${ENABLE_IP_ALIASES} = true ]; then
# Size of ranges allocated to each node. gcloud current supports only /32 and /24.
IP_ALIAS_SIZE=${KUBE_GCE_IP_ALIAS_SIZE:-/24}
IP_ALIAS_SUBNETWORK=${KUBE_GCE_IP_ALIAS_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-default}
# Reserve the services IP space to avoid being allocated for other GCP resources.
SERVICE_CLUSTER_IP_SUBNETWORK=${KUBE_GCE_SERVICE_CLUSTER_IP_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-services}
NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-CloudAllocator}
SECONDARY_RANGE_NAME=${SECONDARY_RANGE_NAME:-}
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_IP_ALIASES"
PROVIDER_VARS="${PROVIDER_VARS:-} NODE_IPAM_MODE"
PROVIDER_VARS="${PROVIDER_VARS:-} SECONDARY_RANGE_NAME"
fi
# Enable GCE Alpha features.
@ -319,7 +320,7 @@ if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
fi
if [[ -z "${KUBE_ADMISSION_CONTROL:-}" ]]; then
ADMISSION_CONTROL="Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,PodPreset,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority"
ADMISSION_CONTROL="Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,PodPreset,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection"
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
fi
@ -343,15 +344,13 @@ STORAGE_BACKEND=${STORAGE_BACKEND:-}
# Storage media type: application/json and application/vnd.kubernetes.protobuf are supported.
STORAGE_MEDIA_TYPE=${STORAGE_MEDIA_TYPE:-}
# OpenContrail networking plugin specific settings
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, opencontrail, kubenet
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, kubenet
# Network Policy plugin specific settings.
NETWORK_POLICY_PROVIDER="${NETWORK_POLICY_PROVIDER:-none}" # calico
NON_MASQUERADE_CIDR="0.0.0.0/0"
# How should the kubelet configure hairpin mode?
HAIRPIN_MODE="${HAIRPIN_MODE:-promiscuous-bridge}" # promiscuous-bridge, hairpin-veth, none
@ -376,10 +375,6 @@ ENABLE_DEFAULT_STORAGE_CLASS="${ENABLE_DEFAULT_STORAGE_CLASS:-true}"
# Upgrade test jobs that go from a version < 1.6 to a version >= 1.6 should override this to be true.
ENABLE_LEGACY_ABAC="${ENABLE_LEGACY_ABAC:-false}" # true, false
# TODO(dawn1107): Remove this once the flag is built into CVM image.
# Kernel panic upon soft lockup issue
SOFTLOCKUP_PANIC="${SOFTLOCKUP_PANIC:-true}" # true, false
# Enable a simple "AdvancedAuditing" setup for testing.
ENABLE_APISERVER_ADVANCED_AUDIT="${ENABLE_APISERVER_ADVANCED_AUDIT:-true}" # true, false
@ -397,9 +392,10 @@ if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
fi
# Fluentd requirements
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-300Mi}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-100m}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-200Mi}"
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.2-1.5.28-1}"
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}"
# Heapster requirements
HEAPSTER_GCP_BASE_MEMORY="${HEAPSTER_GCP_BASE_MEMORY:-140Mi}"
@ -408,7 +404,7 @@ HEAPSTER_GCP_BASE_CPU="${HEAPSTER_GCP_BASE_CPU:-80m}"
HEAPSTER_GCP_CPU_PER_NODE="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE"
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE"
# prometheus-to-sd configuration
PROMETHEUS_TO_SD_ENDPOINT="${PROMETHEUS_TO_SD_ENDPOINT:-https://monitoring.googleapis.com/}"
@ -419,6 +415,9 @@ ENABLE_PROMETHEUS_TO_SD="${ENABLE_PROMETHEUS_TO_SD:-true}"
# Optional: [Experiment Only] Run kube-proxy as a DaemonSet if set to true, run as static pods otherwise.
KUBE_PROXY_DAEMONSET="${KUBE_PROXY_DAEMONSET:-false}" # true, false
# Optional: Change the kube-proxy implementation. Choices are [iptables, ipvs].
KUBE_PROXY_MODE="${KUBE_PROXY_MODE:-iptables}"
# Optional: duration of cluster signed certificates.
CLUSTER_SIGNING_DURATION="${CLUSTER_SIGNING_DURATION:-}"
@ -434,3 +433,9 @@ ROTATE_CERTIFICATES="${ROTATE_CERTIFICATES:-}"
# The number of services that are allowed to sync concurrently. Will be passed
# into kube-controller-manager via `--concurrent-service-syncs`
CONCURRENT_SERVICE_SYNCS="${CONCURRENT_SERVICE_SYNCS:-}"
if [[ "${ENABLE_TOKENREQUEST:-}" == "true" ]]; then
FEATURE_GATES="${FEATURE_GATES},TokenRequest=true"
SERVICEACCOUNT_ISSUER="https://kubernetes.io/${CLUSTER_NAME}"
SERVICEACCOUNT_API_AUDIENCES="https://kubernetes.default.svc"
fi

View File

@ -1,899 +0,0 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# If we have any arguments at all, this is a push and not just setup.
is_push=$@
function ensure-basic-networking() {
# Deal with GCE networking bring-up race. (We rely on DNS for a lot,
# and it's just not worth doing a whole lot of startup work if this
# isn't ready yet.)
until getent hosts metadata.google.internal &>/dev/null; do
echo 'Waiting for functional DNS (trying to resolve metadata.google.internal)...'
sleep 3
done
until getent hosts $(hostname -f || echo _error_) &>/dev/null; do
echo 'Waiting for functional DNS (trying to resolve my own FQDN)...'
sleep 3
done
until getent hosts $(hostname -i || echo _error_) &>/dev/null; do
echo 'Waiting for functional DNS (trying to resolve my own IP)...'
sleep 3
done
echo "Networking functional on $(hostname) ($(hostname -i))"
}
# A hookpoint for installing any needed packages
ensure-packages() {
:
}
function create-node-pki {
echo "Creating node pki files"
local -r pki_dir="/etc/kubernetes/pki"
mkdir -p "${pki_dir}"
if [[ -z "${CA_CERT_BUNDLE:-}" ]]; then
CA_CERT_BUNDLE="${CA_CERT}"
fi
CA_CERT_BUNDLE_PATH="${pki_dir}/ca-certificates.crt"
echo "${CA_CERT_BUNDLE}" | base64 --decode > "${CA_CERT_BUNDLE_PATH}"
if [[ ! -z "${KUBELET_CERT:-}" && ! -z "${KUBELET_KEY:-}" ]]; then
KUBELET_CERT_PATH="${pki_dir}/kubelet.crt"
echo "${KUBELET_CERT}" | base64 --decode > "${KUBELET_CERT_PATH}"
KUBELET_KEY_PATH="${pki_dir}/kubelet.key"
echo "${KUBELET_KEY}" | base64 --decode > "${KUBELET_KEY_PATH}"
fi
}
# A hookpoint for setting up local devices
ensure-local-disks() {
for ssd in /dev/disk/by-id/google-local-ssd-*; do
if [ -e "$ssd" ]; then
ssdnum=`echo $ssd | sed -e 's/\/dev\/disk\/by-id\/google-local-ssd-\([0-9]*\)/\1/'`
echo "Formatting and mounting local SSD $ssd to /mnt/disks/ssd$ssdnum"
mkdir -p /mnt/disks/ssd$ssdnum
/usr/share/google/safe_format_and_mount -m "mkfs.ext4 -F" "${ssd}" /mnt/disks/ssd$ssdnum &>/var/log/local-ssd-$ssdnum-mount.log || \
{ echo "Local SSD $ssdnum mount failed, review /var/log/local-ssd-$ssdnum-mount.log"; return 1; }
else
echo "No local SSD disks found."
fi
done
}
function config-ip-firewall {
echo "Configuring IP firewall rules"
if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]]; then
echo "Add rule for metadata concealment"
iptables -w -t nat -I PREROUTING -p tcp -d 169.254.169.254 --dport 80 -m comment --comment "metadata-concealment: bridge traffic to metadata server goes to metadata proxy" -j DNAT --to-destination 127.0.0.1:988
fi
}
function ensure-install-dir() {
INSTALL_DIR="/var/cache/kubernetes-install"
mkdir -p ${INSTALL_DIR}
cd ${INSTALL_DIR}
}
function salt-apiserver-timeout-grain() {
cat <<EOF >>/etc/salt/minion.d/grains.conf
minRequestTimeout: '$1'
EOF
}
function set-broken-motd() {
echo -e '\nBroken (or in progress) Kubernetes node setup! Suggested first step:\n tail /var/log/startupscript.log\n' > /etc/motd
}
function reset-motd() {
# kubelet is installed both on the master and nodes, and the version is easy to parse (unlike kubectl)
local -r version="$(/usr/local/bin/kubelet --version=true | cut -f2 -d " ")"
# This logic grabs either a release tag (v1.2.1 or v1.2.1-alpha.1),
# or the git hash that's in the build info.
local gitref="$(echo "${version}" | sed -r "s/(v[0-9]+\.[0-9]+\.[0-9]+)(-[a-z]+\.[0-9]+)?.*/\1\2/g")"
local devel=""
if [[ "${gitref}" != "${version}" ]]; then
devel="
Note: This looks like a development version, which might not be present on GitHub.
If it isn't, the closest tag is at:
https://github.com/kubernetes/kubernetes/tree/${gitref}
"
gitref="${version//*+/}"
fi
cat > /etc/motd <<EOF
Welcome to Kubernetes ${version}!
You can find documentation for Kubernetes at:
http://docs.kubernetes.io/
The source for this release can be found at:
/usr/local/share/doc/kubernetes/kubernetes-src.tar.gz
Or you can download it at:
https://storage.googleapis.com/kubernetes-release/release/${version}/kubernetes-src.tar.gz
It is based on the Kubernetes source at:
https://github.com/kubernetes/kubernetes/tree/${gitref}
${devel}
For Kubernetes copyright and licensing information, see:
/usr/local/share/doc/kubernetes/LICENSES
EOF
}
function curl-metadata() {
curl --fail --retry 5 --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/attributes/${1}"
}
function set-kube-env() {
local kube_env_yaml="${INSTALL_DIR}/kube_env.yaml"
until curl-metadata kube-env > "${kube_env_yaml}"; do
echo 'Waiting for kube-env...'
sleep 3
done
# kube-env has all the environment variables we care about, in a flat yaml format
eval "$(python -c '
import pipes,sys,yaml
for k,v in yaml.load(sys.stdin).iteritems():
print("""readonly {var}={value}""".format(var = k, value = pipes.quote(str(v))))
print("""export {var}""".format(var = k))
' < """${kube_env_yaml}""")"
}
function remove-docker-artifacts() {
echo "== Deleting docker0 =="
apt-get-install bridge-utils
# Remove docker artifacts on minion nodes, if present
iptables -t nat -F || true
ifconfig docker0 down || true
brctl delbr docker0 || true
echo "== Finished deleting docker0 =="
}
# Retry a download until we get it. Takes a hash and a set of URLs.
#
# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown.
# $2+ are the URLs to download.
download-or-bust() {
local -r hash="$1"
shift 1
urls=( $* )
while true; do
for url in "${urls[@]}"; do
local file="${url##*/}"
rm -f "${file}"
if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 "${url}"; then
echo "== Failed to download ${url}. Retrying. =="
elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha1sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} =="
return 1
fi
}
apt-get-install() {
local -r packages=( $@ )
installed=true
for package in "${packages[@]}"; do
if ! dpkg -s "${package}" &>/dev/null; then
installed=false
break
fi
done
if [[ "${installed}" == "true" ]]; then
echo "== ${packages[@]} already installed, skipped apt-get install ${packages[@]} =="
return
fi
apt-get-update
# Forcibly install packages (options borrowed from Salt logs).
until apt-get -q -y -o DPkg::Options::=--force-confold -o DPkg::Options::=--force-confdef install $@; do
echo "== install of packages $@ failed, retrying =="
sleep 5
done
}
apt-get-update() {
echo "== Refreshing package database =="
until apt-get update; do
echo "== apt-get update failed, retrying =="
sleep 5
done
}
# Restart any services that need restarting due to a library upgrade
# Uses needrestart
restart-updated-services() {
# We default to restarting services, because this is only done as part of an update
if [[ "${AUTO_RESTART_SERVICES:-true}" != "true" ]]; then
echo "Auto restart of services prevented by AUTO_RESTART_SERVICES=${AUTO_RESTART_SERVICES}"
return
fi
echo "Restarting services with updated libraries (needrestart -r a)"
# The pipes make sure that needrestart doesn't think it is running with a TTY
# Debian bug #803249; fixed but not necessarily in package repos yet
echo "" | needrestart -r a 2>&1 | tee /dev/null
}
# Reboot the machine if /var/run/reboot-required exists
reboot-if-required() {
if [[ ! -e "/var/run/reboot-required" ]]; then
return
fi
echo "Reboot is required (/var/run/reboot-required detected)"
if [[ -e "/var/run/reboot-required.pkgs" ]]; then
echo "Packages that triggered reboot:"
cat /var/run/reboot-required.pkgs
fi
# We default to rebooting the machine because this is only done as part of an update
if [[ "${AUTO_REBOOT:-true}" != "true" ]]; then
echo "Reboot prevented by AUTO_REBOOT=${AUTO_REBOOT}"
return
fi
rm -f /var/run/reboot-required
rm -f /var/run/reboot-required.pkgs
echo "Triggering reboot"
init 6
}
# Install upgrades using unattended-upgrades, then reboot or restart services
auto-upgrade() {
# We default to not installing upgrades
if [[ "${AUTO_UPGRADE:-false}" != "true" ]]; then
echo "AUTO_UPGRADE not set to true; won't auto-upgrade"
return
fi
apt-get-install unattended-upgrades needrestart
unattended-upgrade --debug
reboot-if-required # We may reboot the machine right here
restart-updated-services
}
#
# Install salt from GCS. See README.md for instructions on how to update these
# debs.
install-salt() {
if dpkg -s salt-minion &>/dev/null; then
echo "== SaltStack already installed, skipping install step =="
return
fi
echo "== Refreshing package database =="
until apt-get update; do
echo "== apt-get update failed, retrying =="
sleep 5
done
mkdir -p /var/cache/salt-install
cd /var/cache/salt-install
DEBS=(
libzmq3_3.2.3+dfsg-1~bpo70~dst+1_amd64.deb
python-zmq_13.1.0-1~bpo70~dst+1_amd64.deb
salt-common_2014.1.13+ds-1~bpo70+1_all.deb
salt-minion_2014.1.13+ds-1~bpo70+1_all.deb
)
URL_BASE="https://storage.googleapis.com/kubernetes-release/salt"
for deb in "${DEBS[@]}"; do
if [ ! -e "${deb}" ]; then
download-or-bust "" "${URL_BASE}/${deb}"
fi
done
# Based on
# https://major.io/2014/06/26/install-debian-packages-without-starting-daemons/
# We do this to prevent Salt from starting the salt-minion
# daemon. The other packages don't have relevant daemons. (If you
# add a package that needs a daemon started, add it to a different
# list.)
cat > /usr/sbin/policy-rc.d <<EOF
#!/bin/sh
echo "Salt shall not start." >&2
exit 101
EOF
chmod 0755 /usr/sbin/policy-rc.d
for deb in "${DEBS[@]}"; do
echo "== Installing ${deb}, ignore dependency complaints (will fix later) =="
dpkg --skip-same-version --force-depends -i "${deb}"
done
# This will install any of the unmet dependencies from above.
echo "== Installing unmet dependencies =="
until apt-get install -f -y; do
echo "== apt-get install failed, retrying =="
sleep 5
done
rm /usr/sbin/policy-rc.d
# Log a timestamp
echo "== Finished installing Salt =="
}
# Ensure salt-minion isn't running and never runs
stop-salt-minion() {
if [[ -e /etc/init/salt-minion.override ]]; then
# Assume this has already run (upgrade, or baked into containervm)
return
fi
# This ensures it on next reboot
echo manual > /etc/init/salt-minion.override
update-rc.d salt-minion disable
while service salt-minion status >/dev/null; do
echo "salt-minion found running, stopping"
service salt-minion stop
sleep 1
done
}
# Finds the master PD device; returns it in MASTER_PD_DEVICE
find-master-pd() {
MASTER_PD_DEVICE=""
if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then
return
fi
device_info=$(ls -l /dev/disk/by-id/google-master-pd)
relative_path=${device_info##* }
MASTER_PD_DEVICE="/dev/disk/by-id/${relative_path}"
}
# Create the overlay files for the salt tree. We create these in a separate
# place so that we can blow away the rest of the salt configs on a kube-push and
# re-apply these.
function create-salt-pillar() {
# Always overwrite the cluster-params.sls (even on a push, we have
# these variables)
mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
node_tags: '$(echo "$NODE_TAGS" | sed -e "s/'/''/g")'
node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")'
cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
allocate_node_cidrs: '$(echo "$ALLOCATE_NODE_CIDRS" | sed -e "s/'/''/g")'
non_masquerade_cidr: '$(echo "$NON_MASQUERADE_CIDR" | sed -e "s/'/''/g")'
service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'
enable_cluster_ui: '$(echo "$ENABLE_CLUSTER_UI" | sed -e "s/'/''/g")'
enable_node_problem_detector: '$(echo "$ENABLE_NODE_PROBLEM_DETECTOR" | sed -e "s/'/''/g")'
enable_l7_loadbalancing: '$(echo "$ENABLE_L7_LOADBALANCING" | sed -e "s/'/''/g")'
enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
enable_metadata_proxy: '$(echo "$ENABLE_METADATA_CONCEALMENT" | sed -e "s/'/''/g")'
enable_metrics_server: '$(echo "$ENABLE_METRICS_SERVER" | sed -e "s/'/''/g")'
enable_pod_security_policy: '$(echo "$ENABLE_POD_SECURITY_POLICY" | sed -e "s/'/''/g")'
enable_rescheduler: '$(echo "$ENABLE_RESCHEDULER" | sed -e "s/'/''/g")'
logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
cluster_dns_core_dns: '$(echo "$CLUSTER_DNS_CORE_DNS" | sed -e "s/'/''/g")'
enable_cluster_registry: '$(echo "$ENABLE_CLUSTER_REGISTRY" | sed -e "s/'/''/g")'
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
enable_dns_horizontal_autoscaler: '$(echo "$ENABLE_DNS_HORIZONTAL_AUTOSCALER" | sed -e "s/'/''/g")'
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
network_provider: '$(echo "$NETWORK_PROVIDER" | sed -e "s/'/''/g")'
prepull_e2e_images: '$(echo "$PREPULL_E2E_IMAGES" | sed -e "s/'/''/g")'
hairpin_mode: '$(echo "$HAIRPIN_MODE" | sed -e "s/'/''/g")'
softlockup_panic: '$(echo "$SOFTLOCKUP_PANIC" | sed -e "s/'/''/g")'
opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG" | sed -e "s/'/''/g")'
opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG")'
opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET")'
network_policy_provider: '$(echo "$NETWORK_POLICY_PROVIDER" | sed -e "s/'/''/g")'
enable_manifest_url: '$(echo "${ENABLE_MANIFEST_URL:-}" | sed -e "s/'/''/g")'
manifest_url: '$(echo "${MANIFEST_URL:-}" | sed -e "s/'/''/g")'
manifest_url_header: '$(echo "${MANIFEST_URL_HEADER:-}" | sed -e "s/'/''/g")'
num_nodes: $(echo "${NUM_NODES:-}" | sed -e "s/'/''/g")
e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")'
kube_uid: '$(echo "${KUBE_UID}" | sed -e "s/'/''/g")'
initial_etcd_cluster: '$(echo "${INITIAL_ETCD_CLUSTER:-}" | sed -e "s/'/''/g")'
initial_etcd_cluster_state: '$(echo "${INITIAL_ETCD_CLUSTER_STATE:-}" | sed -e "s/'/''/g")'
ca_cert_bundle_path: '$(echo "${CA_CERT_BUNDLE_PATH:-}" | sed -e "s/'/''/g")'
hostname: '$(echo "${ETCD_HOSTNAME:-$(hostname -s)}" | sed -e "s/'/''/g")'
enable_pod_priority: '$(echo "${ENABLE_POD_PRIORITY:-}" | sed -e "s/'/''/g")'
enable_default_storage_class: '$(echo "$ENABLE_DEFAULT_STORAGE_CLASS" | sed -e "s/'/''/g")'
kube_proxy_daemonset: '$(echo "$KUBE_PROXY_DAEMONSET" | sed -e "s/'/''/g")'
EOF
if [ -n "${STORAGE_BACKEND:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
storage_backend: '$(echo "$STORAGE_BACKEND" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${STORAGE_MEDIA_TYPE:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
storage_media_type: '$(echo "$STORAGE_MEDIA_TYPE" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT_SEC:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kube_apiserver_request_timeout_sec: '$(echo "$KUBE_APISERVER_REQUEST_TIMEOUT_SEC" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
etcd_liveness_probe_initial_delay: '$(echo "$ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kube_apiserver_liveness_probe_initial_delay: '$(echo "$KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ADMISSION_CONTROL:-}" ] && [ ${ADMISSION_CONTROL} == *"ImagePolicyWebhook"* ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
admission-control-config-file: /etc/admission_controller.config
EOF
fi
if [ -n "${KUBELET_PORT:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kubelet_port: '$(echo "$KUBELET_PORT" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ETCD_IMAGE:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
etcd_docker_tag: '$(echo "$ETCD_IMAGE" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ETCD_DOCKER_REPOSITORY:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
etcd_docker_repository: '$(echo "$ETCD_DOCKER_REPOSITORY" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ETCD_VERSION:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
etcd_version: '$(echo "$ETCD_VERSION" | sed -e "s/'/''/g")'
EOF
fi
if [[ -n "${ETCD_CA_KEY:-}" && -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
etcd_over_ssl: 'true'
EOF
else
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
etcd_over_ssl: 'false'
EOF
fi
if [ -n "${ETCD_QUORUM_READ:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
etcd_quorum_read: '$(echo "${ETCD_QUORUM_READ}" | sed -e "s/'/''/g")'
EOF
fi
# Configuration changes for test clusters
if [ -n "${APISERVER_TEST_ARGS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
apiserver_test_args: '$(echo "$APISERVER_TEST_ARGS" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${API_SERVER_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
api_server_test_log_level: '$(echo "$API_SERVER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBELET_TEST_ARGS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kubelet_test_args: '$(echo "$KUBELET_TEST_ARGS" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBELET_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kubelet_test_log_level: '$(echo "$KUBELET_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${DOCKER_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
docker_test_log_level: '$(echo "$DOCKER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${CONTROLLER_MANAGER_TEST_ARGS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
controller_manager_test_args: '$(echo "$CONTROLLER_MANAGER_TEST_ARGS" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
controller_manager_test_log_level: '$(echo "$CONTROLLER_MANAGER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${SCHEDULER_TEST_ARGS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
scheduler_test_args: '$(echo "$SCHEDULER_TEST_ARGS" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${SCHEDULER_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
scheduler_test_log_level: '$(echo "$SCHEDULER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBEPROXY_TEST_ARGS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kubeproxy_test_args: '$(echo "$KUBEPROXY_TEST_ARGS" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBEPROXY_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kubeproxy_test_log_level: '$(echo "$KUBEPROXY_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
# TODO: Replace this with a persistent volume (and create it).
if [[ "${ENABLE_CLUSTER_REGISTRY}" == true && -n "${CLUSTER_REGISTRY_DISK}" ]]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
cluster_registry_disk_type: gce
cluster_registry_disk_size: $(echo $(convert-bytes-gce-kube ${CLUSTER_REGISTRY_DISK_SIZE}) | sed -e "s/'/''/g")
cluster_registry_disk_name: $(echo ${CLUSTER_REGISTRY_DISK} | sed -e "s/'/''/g")
EOF
fi
if [ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
terminated_pod_gc_threshold: '$(echo "${TERMINATED_POD_GC_THRESHOLD}" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ENABLE_CUSTOM_METRICS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
enable_custom_metrics: '$(echo "${ENABLE_CUSTOM_METRICS}" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${NODE_LABELS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
node_labels: '$(echo "${NODE_LABELS}" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${NON_MASTER_NODE_LABELS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
non_master_node_labels: '$(echo "${NON_MASTER_NODE_LABELS}" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${NODE_TAINTS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
node_taints: '$(echo "${NODE_TAINTS}" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${EVICTION_HARD:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
eviction_hard: '$(echo "${EVICTION_HARD}" | sed -e "s/'/''/g")'
EOF
fi
if [[ "${ENABLE_CLUSTER_AUTOSCALER:-false}" == "true" ]]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
enable_cluster_autoscaler: '$(echo "${ENABLE_CLUSTER_AUTOSCALER}" | sed -e "s/'/''/g")'
autoscaler_mig_config: '$(echo "${AUTOSCALER_MIG_CONFIG}" | sed -e "s/'/''/g")'
autoscaler_expander_config: '$(echo "${AUTOSCALER_EXPANDER_CONFIG}" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
scheduling_algorithm_provider: '$(echo "${SCHEDULING_ALGORITHM_PROVIDER}" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ENABLE_IP_ALIASES:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
enable_ip_aliases: '$(echo "$ENABLE_IP_ALIASES" | sed -e "s/'/''/g")'
EOF
fi
}
# The job of this function is simple, but the basic regular expression syntax makes
# this difficult to read. What we want to do is convert from [0-9]+B, KB, KiB, MB, etc
# into [0-9]+, Ki, Mi, Gi, etc.
# This is done in two steps:
# 1. Convert from [0-9]+X?i?B into [0-9]X? (X denotes the prefix, ? means the field
# is optional.
# 2. Attach an 'i' to the end of the string if we find a letter.
# The two step process is needed to handle the edge case in which we want to convert
# a raw byte count, as the result should be a simple number (e.g. 5B -> 5).
function convert-bytes-gce-kube() {
local -r storage_space=$1
echo "${storage_space}" | sed -e 's/^\([0-9]\+\)\([A-Z]\)\?i\?B$/\1\2/g' -e 's/\([A-Z]\)$/\1i/'
}
# This should happen both on cluster initialization and node upgrades.
#
# - Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and
# KUBELET_KEY to generate a kubeconfig file for the kubelet to securely
# connect to the apiserver.
function create-salt-kubelet-auth() {
local -r kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/bootstrap-kubeconfig"
if [ ! -e "${kubelet_kubeconfig_file}" ]; then
mkdir -p /srv/salt-overlay/salt/kubelet
(umask 077;
cat > "${kubelet_kubeconfig_file}" <<EOF
apiVersion: v1
kind: Config
users:
- name: kubelet
user:
client-certificate: ${KUBELET_CERT_PATH}
client-key: ${KUBELET_KEY_PATH}
clusters:
- name: local
cluster:
server: https://${KUBERNETES_MASTER_NAME}
certificate-authority: ${CA_CERT_BUNDLE_PATH}
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context
EOF
)
fi
}
# This should happen both on cluster initialization and node upgrades.
#
# - When run as static pods, use the CA_CERT and KUBE_PROXY_TOKEN to generate a
# kubeconfig file for the kube-proxy to securely connect to the apiserver.
function create-salt-kubeproxy-auth() {
local -r kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig"
if [ ! -e "${kube_proxy_kubeconfig_file}" ]; then
mkdir -p /srv/salt-overlay/salt/kube-proxy
(umask 077;
cat > "${kube_proxy_kubeconfig_file}" <<EOF
apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
clusters:
- name: local
cluster:
certificate-authority-data: ${CA_CERT_BUNDLE}
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context
EOF
)
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
# TODO(zmerlynn): Now we REALLy have no excuse not to do the reboot
# optimization.
local -r server_binary_tar_urls=( $(split-commas "${SERVER_BINARY_TAR_URL}") )
local -r server_binary_tar="${server_binary_tar_urls[0]##*/}"
if [[ -n "${SERVER_BINARY_TAR_HASH:-}" ]]; then
local -r server_binary_tar_hash="${SERVER_BINARY_TAR_HASH}"
else
echo "Downloading binary release sha1 (not found in env)"
download-or-bust "" "${server_binary_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
local -r server_binary_tar_hash=$(cat "${server_binary_tar}.sha1")
fi
echo "Downloading binary release tar (${server_binary_tar_urls[@]})"
download-or-bust "${server_binary_tar_hash}" "${server_binary_tar_urls[@]}"
local -r salt_tar_urls=( $(split-commas "${SALT_TAR_URL}") )
local -r salt_tar="${salt_tar_urls[0]##*/}"
if [[ -n "${SALT_TAR_HASH:-}" ]]; then
local -r salt_tar_hash="${SALT_TAR_HASH}"
else
echo "Downloading Salt tar sha1 (not found in env)"
download-or-bust "" "${salt_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
local -r salt_tar_hash=$(cat "${salt_tar}.sha1")
fi
echo "Downloading Salt tar (${salt_tar_urls[@]})"
download-or-bust "${salt_tar_hash}" "${salt_tar_urls[@]}"
echo "Unpacking Salt tree and checking integrity of binary release tar"
rm -rf kubernetes
tar xzf "${salt_tar}" && tar tzf "${server_binary_tar}" > /dev/null
}
function download-release() {
# In case of failure checking integrity of release, retry.
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running release install script"
kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR_URL##*/}"
}
function fix-apt-sources() {
sed -i -e "\|^deb.*http://http.debian.net/debian| s/^/#/" /etc/apt/sources.list
sed -i -e "\|^deb.*http://ftp.debian.org/debian| s/^/#/" /etc/apt/sources.list.d/backports.list
}
function salt-run-local() {
cat <<EOF >/etc/salt/minion.d/local.conf
file_client: local
file_roots:
base:
- /srv/salt
EOF
}
function salt-debug-log() {
cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
log_level: debug
log_level_logfile: debug
EOF
}
function salt-node-role() {
local -r kubelet_bootstrap_kubeconfig="/srv/salt-overlay/salt/kubelet/bootstrap-kubeconfig"
local -r kubelet_kubeconfig="/srv/salt-overlay/salt/kubelet/kubeconfig"
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
roles:
- kubernetes-pool
cloud: gce
api_servers: '${KUBERNETES_MASTER_NAME}'
kubelet_bootstrap_kubeconfig: /var/lib/kubelet/bootstrap-kubeconfig
kubelet_kubeconfig: /var/lib/kubelet/kubeconfig
EOF
}
function env-to-grains {
local key=$1
local env_key=`echo $key | tr '[:lower:]' '[:upper:]'`
local value=${!env_key:-}
if [[ -n "${value}" ]]; then
# Note this is yaml, so indentation matters
cat <<EOF >>/etc/salt/minion.d/grains.conf
${key}: '$(echo "${value}" | sed -e "s/'/''/g")'
EOF
fi
}
function node-docker-opts() {
if [[ -n "${EXTRA_DOCKER_OPTS-}" ]]; then
DOCKER_OPTS="${DOCKER_OPTS:-} ${EXTRA_DOCKER_OPTS}"
fi
# Decide whether to enable a docker registry mirror. This is taken from
# the "kube-env" metadata value.
if [[ -n "${DOCKER_REGISTRY_MIRROR_URL:-}" ]]; then
echo "Enable docker registry mirror at: ${DOCKER_REGISTRY_MIRROR_URL}"
DOCKER_OPTS="${DOCKER_OPTS:-} --registry-mirror=${DOCKER_REGISTRY_MIRROR_URL}"
fi
}
function salt-grains() {
env-to-grains "docker_opts"
env-to-grains "docker_root"
env-to-grains "kubelet_root"
env-to-grains "feature_gates"
}
function configure-salt() {
mkdir -p /etc/salt/minion.d
salt-run-local
salt-node-role
node-docker-opts
salt-grains
install-salt
stop-salt-minion
}
function run-salt() {
echo "== Calling Salt =="
local rc=0
for i in {0..6}; do
salt-call --retcode-passthrough --local state.highstate && rc=0 || rc=$?
if [[ "${rc}" == 0 ]]; then
return 0
fi
done
echo "Salt failed to run repeatedly" >&2
return "${rc}"
}
function run-user-script() {
if curl-metadata k8s-user-startup-script > "${INSTALL_DIR}/k8s-user-script.sh"; then
user_script=$(cat "${INSTALL_DIR}/k8s-user-script.sh")
fi
if [[ ! -z ${user_script:-} ]]; then
chmod u+x "${INSTALL_DIR}/k8s-user-script.sh"
echo "== running user startup script =="
"${INSTALL_DIR}/k8s-user-script.sh"
fi
}
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
echo "Support for debian master has been removed"
exit 1
fi
if [[ -z "${is_push}" ]]; then
echo "== kube-up node config starting =="
set-broken-motd
ensure-basic-networking
fix-apt-sources
ensure-install-dir
ensure-packages
set-kube-env
auto-upgrade
ensure-local-disks
create-node-pki
create-salt-pillar
create-salt-kubelet-auth
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
create-salt-kubeproxy-auth
fi
download-release
configure-salt
remove-docker-artifacts
config-ip-firewall
run-salt
reset-motd
run-user-script
echo "== kube-up node config done =="
else
echo "== kube-push node config starting =="
ensure-basic-networking
ensure-install-dir
set-kube-env
create-salt-pillar
download-release
reset-motd
run-salt
echo "== kube-push node config done =="
fi

View File

@ -1,8 +0,0 @@
approvers:
- euank
- yifan-gu
- ethernetdan
reviewers:
- euank
- yifan-gu
- ethernetdan

View File

@ -1,8 +0,0 @@
# Container Linux image
The [Container Linux Operating System](https://coreos.com/why/) is a Linux distribution optimized for running containers securely at scale.
CoreOS provides [a Container Linux image](https://coreos.com/os/docs/latest/booting-on-google-compute-engine.html) for Google Cloud Platform (GCP).
This folder contains configuration and tooling to allow kube-up to create a Kubernetes cluster on Google Cloud Platform running on the official Container Linux image.
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/gce/container-linux/README.md?pixel)]()

Some files were not shown because too many files have changed in this diff Show More