Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -15,6 +15,7 @@ filegroup(
":package-srcs",
"//cluster/addons:all-srcs",
"//cluster/gce:all-srcs",
"//cluster/images/conformance:all-srcs",
"//cluster/images/etcd-version-monitor:all-srcs",
"//cluster/images/etcd/migrate:all-srcs",
"//cluster/images/hyperkube:all-srcs",

View File

@ -3,6 +3,7 @@ reviewers:
- jbeda
- mikedanese
- roberthbailey
- spiffxp
- zmerlynn
approvers:
- eparis
@ -10,3 +11,5 @@ approvers:
- mikedanese
- roberthbailey
- zmerlynn
labels:
- sig/cluster-lifecycle

View File

@ -1,14 +1,12 @@
# Cluster Configuration
##### Deprecation Notice: This directory has entered maintenance mode and will not be accepting new providers. Please submit new automation deployments to [kube-deploy](https://github.com/kubernetes/kube-deploy). Deployments in this directory will continue to be maintained and supported at their current level of support.
##### Deprecation Notice: This directory has entered maintenance mode and will not be accepting new providers. Deployments in this directory will continue to be maintained and supported at their current level of support.
The scripts and data in this directory automate creation and configuration of a Kubernetes cluster, including networking, DNS, nodes, and master components.
The scripts and data in this directory automate creation and configuration of a Kubernetes cluster, including networking, DNS, nodes, and control plane components.
See the [getting-started guides](https://kubernetes.io/docs/getting-started-guides) for examples of how to use the scripts.
*cloudprovider*/`config-default.sh` contains a set of tweakable definitions/parameters for the cluster.
The heavy lifting of configuring the VMs is done by [SaltStack](http://www.saltstack.com/).
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/README.md?pixel)]()

View File

@ -1,3 +1,14 @@
### Version 8.9 (Fri October 19 2018 Jeff Grafton <jgrafton@google.com>)
- Update to use debian-base:0.4.0.
- Update kubectl to v1.11.3.
### Version 8.8 (Mon October 1 2018 Zihong Zheng <zihongz@google.com>)
- Update to use debian-base:0.3.2.
### Version 8.7 (Tue September 4 2018 Zihong Zheng <zihongz@google.com>)
- Support extra `--prune-whitelist` resources in kube-addon-manager.
- Update kubectl to v1.10.7.
### Version 8.6 (Tue February 20 2018 Zihong Zheng <zihongz@google.com>)
- Allow reconcile/ensure loop to work with resource under non-kube-system namespace.
- Update kubectl to v1.9.3.

View File

@ -14,6 +14,8 @@
FROM BASEIMAGE
RUN clean-install bash
ADD kube-addons.sh /opt/
ADD namespace.yaml /opt/
ADD kubectl /usr/local/bin/

View File

@ -15,24 +15,10 @@
IMAGE=staging-k8s.gcr.io/kube-addon-manager
ARCH?=amd64
TEMP_DIR:=$(shell mktemp -d)
VERSION=v8.6
KUBECTL_VERSION?=v1.9.3
VERSION=v8.9
KUBECTL_VERSION?=v1.11.3
ifeq ($(ARCH),amd64)
BASEIMAGE?=bashell/alpine-bash
endif
ifeq ($(ARCH),arm)
BASEIMAGE?=arm32v7/debian
endif
ifeq ($(ARCH),arm64)
BASEIMAGE?=arm64v8/debian
endif
ifeq ($(ARCH),ppc64le)
BASEIMAGE?=ppc64le/debian
endif
ifeq ($(ARCH),s390x)
BASEIMAGE?=s390x/debian
endif
BASEIMAGE=k8s.gcr.io/debian-base-$(ARCH):0.4.0
.PHONY: build push

View File

@ -28,6 +28,29 @@
KUBECTL=${KUBECTL_BIN:-/usr/local/bin/kubectl}
KUBECTL_OPTS=${KUBECTL_OPTS:-}
# KUBECTL_PRUNE_WHITELIST is a list of resources whitelisted by
# default.
# This is currently the same with the default in:
# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/apply.go
KUBECTL_PRUNE_WHITELIST=(
core/v1/ConfigMap
core/v1/Endpoints
core/v1/Namespace
core/v1/PersistentVolumeClaim
core/v1/PersistentVolume
core/v1/Pod
core/v1/ReplicationController
core/v1/Secret
core/v1/Service
batch/v1/Job
batch/v1beta1/CronJob
extensions/v1beta1/DaemonSet
extensions/v1beta1/Deployment
extensions/v1beta1/Ingress
extensions/v1beta1/ReplicaSet
apps/v1beta1/StatefulSet
apps/v1beta1/Deployment
)
ADDON_CHECK_INTERVAL_SEC=${TEST_ADDON_CHECK_INTERVAL_SEC:-60}
ADDON_PATH=${ADDON_PATH:-/etc/kubernetes/addons}
@ -82,6 +105,25 @@ function log() {
esac
}
# Generate kubectl prune-whitelist flags from provided resource list.
function generate_prune_whitelist_flags() {
local -r resources=($@)
for resource in "${resources[@]}"; do
printf "%s" "--prune-whitelist ${resource} "
done
}
# KUBECTL_EXTRA_PRUNE_WHITELIST is a list of extra whitelisted resources
# besides the default ones.
extra_prune_whitelist=
if [ -n "${KUBECTL_EXTRA_PRUNE_WHITELIST:-}" ]; then
extra_prune_whitelist=( ${KUBECTL_EXTRA_PRUNE_WHITELIST:-} )
fi
prune_whitelist=( ${KUBECTL_PRUNE_WHITELIST[@]} ${extra_prune_whitelist[@]} )
prune_whitelist_flags=$(generate_prune_whitelist_flags ${prune_whitelist[@]})
log INFO "== Generated kubectl prune whitelist flags: $prune_whitelist_flags =="
# $1 filename of addon to start.
# $2 count of tries to start the addon.
# $3 delay in seconds between two consecutive tries
@ -126,12 +168,12 @@ function reconcile_addons() {
log INFO "== Reconciling with deprecated label =="
${KUBECTL} ${KUBECTL_OPTS} apply -f ${ADDON_PATH} \
-l ${CLUSTER_SERVICE_LABEL}=true,${ADDON_MANAGER_LABEL}!=EnsureExists \
--prune=true --recursive | grep -v configured
--prune=true ${prune_whitelist_flags} --recursive | grep -v configured
log INFO "== Reconciling with addon-manager label =="
${KUBECTL} ${KUBECTL_OPTS} apply -f ${ADDON_PATH} \
-l ${CLUSTER_SERVICE_LABEL}!=true,${ADDON_MANAGER_LABEL}=Reconcile \
--prune=true --recursive | grep -v configured
--prune=true ${prune_whitelist_flags} --recursive | grep -v configured
log INFO "== Kubernetes addon reconcile completed at $(date -Is) =="
}
@ -166,7 +208,7 @@ function is_leader() {
# The business logic for whether a given object should be created
# was already enforced by salt, and /etc/kubernetes/addons is the
# managed result is of that. Start everything below that directory.
# managed result of that. Start everything below that directory.
log INFO "== Kubernetes addon manager started at $(date -Is) with ADDON_CHECK_INTERVAL_SEC=${ADDON_CHECK_INTERVAL_SEC} =="
# Create the namespace that will be used to host the cluster-level add-ons.

View File

@ -10,6 +10,7 @@ rules:
- apiGroups: [""]
resources:
- namespaces
- serviceaccounts
verbs:
- get
- list

View File

@ -17,19 +17,22 @@ spec:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
securityContext:
supplementalGroups: [ 65534 ]
fsGroup: 65534
containers:
- image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2
name: autoscaler
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=calico-typha-horizontal-autoscaler
- --target=deployment/calico-typha
- --logtostderr=true
- --v=2
resources:
requests:
cpu: 10m
limits:
cpu: 10m
- image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2
name: autoscaler
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=calico-typha-horizontal-autoscaler
- --target=deployment/calico-typha
- --logtostderr=true
- --v=2
resources:
requests:
cpu: 10m
limits:
cpu: 10m
serviceAccountName: typha-cpha

View File

@ -1,6 +1,8 @@
approvers:
- bowei
- nicksardo
- rramkumar1
- mrhohn
reviewers:
- bowei
- nicksardo
- rramkumar1
- mrhohn

View File

@ -25,7 +25,7 @@ spec:
# Any image is permissible as long as:
# 1. It serves a 404 page at /
# 2. It serves 200 on a /healthz endpoint
image: k8s.gcr.io/defaultbackend:1.4
image: k8s.gcr.io/defaultbackend-amd64:1.5
livenessProbe:
httpGet:
path: /healthz

View File

@ -36,31 +36,34 @@ data:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.5.3
name: heapster-v1.6.0-beta.1
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.5.3
version: v1.6.0-beta.1
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.5.3
version: v1.6.0-beta.1
template:
metadata:
labels:
k8s-app: heapster
version: v1.5.3
version: v1.6.0-beta.1
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
securityContext:
supplementalGroups: [ 65534 ]
fsGroup: 65534
containers:
- image: k8s.gcr.io/heapster-amd64:v1.5.3
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
name: heapster
livenessProbe:
httpGet:
@ -73,13 +76,13 @@ spec:
- /heapster
- --source=kubernetes.summary_api:''
- --sink=gcm
- image: k8s.gcr.io/heapster-amd64:v1.5.3
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
name: eventer
command:
- /eventer
- --source=kubernetes:''
- --sink=gcl
- image: k8s.gcr.io/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.4
name: heapster-nanny
resources:
limits:
@ -108,11 +111,14 @@ spec:
- --memory={{ base_metrics_memory }}
- --extra-memory={{metrics_memory_per_node}}Mi
- --threshold=5
- --deployment=heapster-v1.5.3
- --deployment=heapster-v1.6.0-beta.1
- --container=heapster
- --poll-period=300000
- --estimator=exponential
- image: k8s.gcr.io/addon-resizer:1.8.1
# Specifies the smallest cluster (defined in number of nodes)
# resources will be scaled to.
- --minClusterSize={{ heapster_min_cluster_size }}
- image: k8s.gcr.io/addon-resizer:1.8.4
name: eventer-nanny
resources:
limits:
@ -141,7 +147,7 @@ spec:
- --memory={{base_eventer_memory}}
- --extra-memory={{eventer_memory_per_node}}Ki
- --threshold=5
- --deployment=heapster-v1.5.3
- --deployment=heapster-v1.6.0-beta.1
- --container=eventer
- --poll-period=300000
- --estimator=exponential

View File

@ -36,31 +36,34 @@ data:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.5.3
name: heapster-v1.6.0-beta.1
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.5.3
version: v1.6.0-beta.1
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.5.3
version: v1.6.0-beta.1
template:
metadata:
labels:
k8s-app: heapster
version: v1.5.3
version: v1.6.0-beta.1
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
securityContext:
supplementalGroups: [ 65534 ]
fsGroup: 65534
containers:
- image: k8s.gcr.io/heapster-amd64:v1.5.3
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
name: heapster
livenessProbe:
httpGet:
@ -74,13 +77,13 @@ spec:
- --source=kubernetes.summary_api:''
- --sink=influxdb:http://monitoring-influxdb:8086
- --sink=gcm:?metrics=autoscaling
- image: k8s.gcr.io/heapster-amd64:v1.5.3
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
name: eventer
command:
- /eventer
- --source=kubernetes:''
- --sink=gcl
- image: k8s.gcr.io/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.4
name: heapster-nanny
resources:
limits:
@ -109,11 +112,14 @@ spec:
- --memory={{ base_metrics_memory }}
- --extra-memory={{ metrics_memory_per_node }}Mi
- --threshold=5
- --deployment=heapster-v1.5.3
- --deployment=heapster-v1.6.0-beta.1
- --container=heapster
- --poll-period=300000
- --estimator=exponential
- image: k8s.gcr.io/addon-resizer:1.8.1
# Specifies the smallest cluster (defined in number of nodes)
# resources will be scaled to.
- --minClusterSize={{ heapster_min_cluster_size }}
- image: k8s.gcr.io/addon-resizer:1.8.4
name: eventer-nanny
resources:
limits:
@ -142,7 +148,7 @@ spec:
- --memory={{ base_eventer_memory }}
- --extra-memory={{ eventer_memory_per_node }}Ki
- --threshold=5
- --deployment=heapster-v1.5.3
- --deployment=heapster-v1.6.0-beta.1
- --container=eventer
- --poll-period=300000
- --estimator=exponential

View File

@ -36,31 +36,34 @@ data:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.5.3
name: heapster-v1.6.0-beta.1
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.5.3
version: v1.6.0-beta.1
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.5.3
version: v1.6.0-beta.1
template:
metadata:
labels:
k8s-app: heapster
version: v1.5.3
version: v1.6.0-beta.1
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
securityContext:
supplementalGroups: [ 65534 ]
fsGroup: 65534
containers:
- image: k8s.gcr.io/heapster-amd64:v1.5.3
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
name: heapster
livenessProbe:
httpGet:
@ -73,13 +76,13 @@ spec:
- /heapster
- --source=kubernetes.summary_api:''
- --sink=influxdb:http://monitoring-influxdb:8086
- image: k8s.gcr.io/heapster-amd64:v1.5.3
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
name: eventer
command:
- /eventer
- --source=kubernetes:''
- --sink=influxdb:http://monitoring-influxdb:8086
- image: k8s.gcr.io/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.4
name: heapster-nanny
resources:
limits:
@ -108,11 +111,14 @@ spec:
- --memory={{ base_metrics_memory }}
- --extra-memory={{ metrics_memory_per_node }}Mi
- --threshold=5
- --deployment=heapster-v1.5.3
- --deployment=heapster-v1.6.0-beta.1
- --container=heapster
- --poll-period=300000
- --estimator=exponential
- image: k8s.gcr.io/addon-resizer:1.8.1
# Specifies the smallest cluster (defined in number of nodes)
# resources will be scaled to.
- --minClusterSize={{ heapster_min_cluster_size }}
- image: k8s.gcr.io/addon-resizer:1.8.4
name: eventer-nanny
resources:
limits:
@ -141,7 +147,7 @@ spec:
- --memory={{ base_eventer_memory }}
- --extra-memory={{ eventer_memory_per_node }}Ki
- --threshold=5
- --deployment=heapster-v1.5.3
- --deployment=heapster-v1.6.0-beta.1
- --container=eventer
- --poll-period=300000
- --estimator=exponential

View File

@ -23,31 +23,34 @@ data:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.5.3
name: heapster-v1.6.0-beta.1
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.5.3
version: v1.6.0-beta.1
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.5.3
version: v1.6.0-beta.1
template:
metadata:
labels:
k8s-app: heapster
version: v1.5.3
version: v1.6.0-beta.1
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
securityContext:
supplementalGroups: [ 65534 ]
fsGroup: 65534
containers:
- image: k8s.gcr.io/heapster-amd64:v1.5.3
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
name: heapster
livenessProbe:
httpGet:
@ -57,12 +60,13 @@ spec:
initialDelaySeconds: 180
timeoutSeconds: 5
command:
# On GCP, container.googleapis.com/instance_id node annotation is used to provide instance_id label for Stackdriver
- /heapster
- --source=kubernetes.summary_api:''
- --source=kubernetes.summary_api:?host_id_annotation=container.googleapis.com/instance_id
- --sink=stackdriver:?cluster_name={{ cluster_name }}&use_old_resources={{ use_old_resources }}&use_new_resources={{ use_new_resources }}&min_interval_sec=100&batch_export_timeout_sec=110&cluster_location={{ cluster_location }}
# BEGIN_PROMETHEUS_TO_SD
- name: prom-to-sd
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
image: k8s.gcr.io/prometheus-to-sd:v0.3.1
command:
- /monitor
- --source=heapster:http://localhost:8082?whitelisted=stackdriver_requests_count,stackdriver_timeseries_count
@ -80,7 +84,7 @@ spec:
fieldRef:
fieldPath: metadata.namespace
# END_PROMETHEUS_TO_SD
- image: k8s.gcr.io/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.4
name: heapster-nanny
resources:
limits:
@ -109,10 +113,13 @@ spec:
- --memory={{ base_metrics_memory }}
- --extra-memory={{metrics_memory_per_node}}Mi
- --threshold=5
- --deployment=heapster-v1.5.3
- --deployment=heapster-v1.6.0-beta.1
- --container=heapster
- --poll-period=300000
- --estimator=exponential
# Specifies the smallest cluster (defined in number of nodes)
# resources will be scaled to.
- --minClusterSize={{ heapster_min_cluster_size }}
volumes:
- name: heapster-config-volume
configMap:

View File

@ -23,31 +23,34 @@ data:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.5.3
name: heapster-v1.6.0-beta.1
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.5.3
version: v1.6.0-beta.1
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.5.3
version: v1.6.0-beta.1
template:
metadata:
labels:
k8s-app: heapster
version: v1.5.3
version: v1.6.0-beta.1
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
securityContext:
supplementalGroups: [ 65534 ]
fsGroup: 65534
containers:
- image: k8s.gcr.io/heapster-amd64:v1.5.3
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
name: heapster
livenessProbe:
httpGet:
@ -59,7 +62,7 @@ spec:
command:
- /heapster
- --source=kubernetes.summary_api:''
- image: k8s.gcr.io/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.4
name: heapster-nanny
resources:
limits:
@ -88,10 +91,13 @@ spec:
- --memory={{ base_metrics_memory }}
- --extra-memory={{ metrics_memory_per_node }}Mi
- --threshold=5
- --deployment=heapster-v1.5.3
- --deployment=heapster-v1.6.0-beta.1
- --container=heapster
- --poll-period=300000
- --estimator=exponential
# Specifies the smallest cluster (defined in number of nodes)
# resources will be scaled to.
- --minClusterSize={{ heapster_min_cluster_size }}
volumes:
- name: heapster-config-volume
configMap:

View File

@ -1,6 +1,12 @@
approvers:
- floreks
- maciaszczykm
- bryk
reviewers:
- cheld
- cupofcat
- danielromlein
- floreks
- ianlewis
- konryd
- maciaszczykm
- mhenc
- rf232

View File

@ -31,7 +31,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: kubernetes-dashboard
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.0
resources:
limits:
cpu: 100m

View File

@ -29,7 +29,7 @@ metadata:
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list"]
verbs: ["list", "watch"]
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
@ -80,9 +80,12 @@ spec:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
securityContext:
supplementalGroups: [ 65534 ]
fsGroup: 65534
containers:
- name: autoscaler
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.3.0
resources:
requests:
cpu: "20m"

View File

@ -27,6 +27,12 @@ rules:
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
@ -66,7 +72,9 @@ data:
prometheus :9153
proxy . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
---
apiVersion: extensions/v1beta1
@ -100,13 +108,11 @@ spec:
spec:
serviceAccountName: coredns
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
- name: coredns
image: k8s.gcr.io/coredns:1.1.3
image: k8s.gcr.io/coredns:1.2.6
imagePullPolicy: IfNotPresent
resources:
limits:

View File

@ -27,6 +27,12 @@ rules:
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
@ -66,7 +72,9 @@ data:
prometheus :9153
proxy . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
---
apiVersion: extensions/v1beta1
@ -100,13 +108,11 @@ spec:
spec:
serviceAccountName: coredns
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
- name: coredns
image: k8s.gcr.io/coredns:1.1.3
image: k8s.gcr.io/coredns:1.2.6
imagePullPolicy: IfNotPresent
resources:
limits:

View File

@ -27,6 +27,12 @@ rules:
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
@ -66,7 +72,9 @@ data:
prometheus :9153
proxy . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
---
apiVersion: extensions/v1beta1
@ -100,13 +108,11 @@ spec:
spec:
serviceAccountName: coredns
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
- name: coredns
image: k8s.gcr.io/coredns:1.1.3
image: k8s.gcr.io/coredns:1.2.6
imagePullPolicy: IfNotPresent
resources:
limits:

View File

@ -9,7 +9,7 @@ can use the DNS Services IP to resolve DNS names.
## Manually scale kube-dns Deployment
kube-dns creates only one DNS Pod by default. If
[dns-horizontal-autoscaler](../dns-horizontal-autoscaler/)
[dns-horizontal-autoscaler](../../dns-horizontal-autoscaler/)
is not enabled, you may need to manually scale kube-dns Deployment.
Please use below `kubectl scale` command to scale:
@ -18,9 +18,9 @@ kubectl --namespace=kube-system scale deployment kube-dns --replicas=<NUM_YOU_WA
```
Do not use `kubectl edit` to modify kube-dns Deployment object if it is
controlled by [Addon Manager](../addon-manager/). Otherwise the modifications
controlled by [Addon Manager](../../addon-manager/). Otherwise the modifications
will be clobbered, in addition the replicas count for kube-dns Deployment will
be reset to 1. See [Cluster add-ons README](../README.md) and
be reset to 1. See [Cluster add-ons README](../../README.md) and
[#36411](https://github.com/kubernetes/kubernetes/issues/36411) for reference.
## kube-dns addon templates

View File

@ -86,6 +86,9 @@ spec:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
securityContext:
supplementalGroups: [ 65534 ]
fsGroup: 65534
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
@ -96,7 +99,7 @@ spec:
optional: true
containers:
- name: kubedns
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10
image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -147,7 +150,7 @@ spec:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.13
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@ -166,6 +169,7 @@ spec:
- -k
- --cache-size=1000
- --no-negcache
- --dns-loop-detect
- --log-facility=-
- --server=/__PILLAR__DNS__DOMAIN__/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
@ -186,7 +190,7 @@ spec:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10
image: k8s.gcr.io/k8s-dns-sidecar:1.14.13
livenessProbe:
httpGet:
path: /metrics

View File

@ -86,6 +86,9 @@ spec:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
securityContext:
supplementalGroups: [ 65534 ]
fsGroup: 65534
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
@ -96,7 +99,7 @@ spec:
optional: true
containers:
- name: kubedns
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10
image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -147,7 +150,7 @@ spec:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.13
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@ -166,6 +169,7 @@ spec:
- -k
- --cache-size=1000
- --no-negcache
- --dns-loop-detect
- --log-facility=-
- --server=/{{ pillar['dns_domain'] }}/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
@ -186,7 +190,7 @@ spec:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10
image: k8s.gcr.io/k8s-dns-sidecar:1.14.13
livenessProbe:
httpGet:
path: /metrics

View File

@ -86,6 +86,9 @@ spec:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
securityContext:
supplementalGroups: [ 65534 ]
fsGroup: 65534
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
@ -96,7 +99,7 @@ spec:
optional: true
containers:
- name: kubedns
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10
image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -147,7 +150,7 @@ spec:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.13
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@ -166,6 +169,7 @@ spec:
- -k
- --cache-size=1000
- --no-negcache
- --dns-loop-detect
- --log-facility=-
- --server=/$DNS_DOMAIN/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
@ -186,7 +190,7 @@ spec:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10
image: k8s.gcr.io/k8s-dns-sidecar:1.14.13
livenessProbe:
httpGet:
path: /metrics

View File

@ -0,0 +1,35 @@
# Nodelocal DNS Cache
This addon runs a node-local-dns pod on all cluster nodes. The pod runs CoreDNS as the dns cache. It runs with `hostNetwork:True` and creates a dedicated dummy interface with a link local ip(169.254.20.10/32 by default) to listen for DNS queries. The cache instances connect to clusterDNS in case of cache misses.
Design details [here](https://github.com/kubernetes/community/blob/master/keps/sig-network/0030-nodelocal-dns-cache.md)
## nodelocaldns addon template
This directory contains the addon config yaml - `nodelocaldns.yaml`
The variables will be substituted by the configure scripts when the yaml is copied into master.
### Network policy and DNS connectivity
When running nodelocaldns addon on clusters using network policy, additional rules might be required to enable dns connectivity.
Using a namespace selector for dns egress traffic as shown [here](https://docs.projectcalico.org/v2.6/getting-started/kubernetes/tutorials/advanced-policy)
might not be enough since the node-local-dns pods run with `hostNetwork: True`
One way to enable connectivity from node-local-dns pods to clusterDNS ip is to use an ipBlock rule instead:
```
spec:
egress:
- ports:
- port: 53
protocol: TCP
- port: 53
protocol: UDP
to:
- ipBlock:
cidr: <well-known clusterIP for DNS>/32
podSelector: {}
policyTypes:
- Ingress
- Egress
```

View File

@ -0,0 +1,144 @@
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: v1
kind: ServiceAccount
metadata:
name: node-local-dns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: v1
kind: ConfigMap
metadata:
name: node-local-dns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
__PILLAR__DNS__DOMAIN__:53 {
errors
cache 30
reload
loop
bind __PILLAR__LOCAL__DNS__
forward . __PILLAR__DNS__SERVER__ {
force_tcp
}
prometheus :9253
health __PILLAR__LOCAL__DNS__:8080
}
in-addr.arpa:53 {
errors
cache 30
reload
loop
bind __PILLAR__LOCAL__DNS__
forward . __PILLAR__DNS__SERVER__ {
force_tcp
}
prometheus :9253
}
ip6.arpa:53 {
errors
cache 30
reload
loop
bind __PILLAR__LOCAL__DNS__
forward . __PILLAR__DNS__SERVER__ {
force_tcp
}
prometheus :9253
}
.:53 {
errors
cache 30
reload
loop
bind __PILLAR__LOCAL__DNS__
forward . /etc/resolv.conf {
force_tcp
}
prometheus :9253
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: node-local-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: node-local-dns
template:
metadata:
labels:
k8s-app: node-local-dns
spec:
priorityClassName: system-node-critical
serviceAccountName: node-local-dns
hostNetwork: true
dnsPolicy: Default # Don't use cluster DNS.
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
- name: node-cache
image: k8s.gcr.io/k8s-dns-node-cache:1.15.0
resources:
limits:
memory: 30Mi
requests:
cpu: 25m
memory: 5Mi
args: [ "-localip", "__PILLAR__LOCAL__DNS__", "-conf", "/etc/coredns/Corefile" ]
securityContext:
privileged: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9253
name: metrics
protocol: TCP
livenessProbe:
httpGet:
host: __PILLAR__LOCAL__DNS__
path: /health
port: 8080
initialDelaySeconds: 60
timeoutSeconds: 5
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
volumes:
- name: config-volume
configMap:
name: node-local-dns
items:
- key: Corefile
path: Corefile

View File

@ -4,3 +4,5 @@ approvers:
reviewers:
- coffeepac
- piosz
labels:
- sig/instrumentation

View File

@ -19,15 +19,16 @@ a Deployment, but allows for maintaining state on storage volumes.
### Security
Elasticsearch has capabilities to enable authorization using the
[X-Pack plugin][xPack]. See configuration parameter `xpack.security.enabled`
in Elasticsearch and Kibana configurations. It can also be set via the
`XPACK_SECURITY_ENABLED` env variable. After enabling the feature,
follow [official documentation][setupCreds] to set up credentials in
Elasticsearch and Kibana. Don't forget to propagate those credentials also to
Fluentd in its [configuration][fluentdCreds], using for example
[environment variables][fluentdEnvVar]. You can utilize [ConfigMaps][configMap]
and [Secrets][secret] to store credentials in the Kubernetes apiserver.
Elasticsearch has capabilities to enable authorization using the [X-Pack
plugin][xPack]. For the sake of simplicity this example uses the fully open
source prebuild images from elastic that do not contain the X-Pack plugin. If
you need these features, please consider building the images from either the
"basic" or "platinum" version. After enabling these features, follow [official
documentation][setupCreds] to set up credentials in Elasticsearch and Kibana.
Don't forget to propagate those credentials also to Fluentd in its
[configuration][fluentdCreds], using for example [environment
variables][fluentdEnvVar]. You can utilize [ConfigMaps][configMap] and
[Secrets][secret] to store credentials in the Kubernetes apiserver.
### Initialization

View File

@ -18,11 +18,11 @@ go_library(
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM docker.elastic.co/elasticsearch/elasticsearch:5.6.4
FROM docker.elastic.co/elasticsearch/elasticsearch-oss:6.3.2
VOLUME ["/data"]
EXPOSE 9200 9300

View File

@ -16,7 +16,7 @@
PREFIX = staging-k8s.gcr.io
IMAGE = elasticsearch
TAG = v5.6.4
TAG = v6.3.0
build:
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .

View File

@ -12,6 +12,3 @@ path.data: /data
network.host: 0.0.0.0
discovery.zen.minimum_master_nodes: ${MINIMUM_MASTER_NODES}
xpack.security.enabled: false
xpack.monitoring.enabled: false

View File

@ -20,14 +20,15 @@ import (
"flag"
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/klog"
api "k8s.io/kubernetes/pkg/apis/core"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
)
@ -60,22 +61,22 @@ func flattenSubsets(subsets []api.EndpointSubset) []string {
func main() {
flag.Parse()
glog.Info("Kubernetes Elasticsearch logging discovery")
klog.Info("Kubernetes Elasticsearch logging discovery")
cc, err := buildConfigFromEnvs(os.Getenv("APISERVER_HOST"), os.Getenv("KUBE_CONFIG_FILE"))
if err != nil {
glog.Fatalf("Failed to make client: %v", err)
klog.Fatalf("Failed to make client: %v", err)
}
client, err := clientset.NewForConfig(cc)
if err != nil {
glog.Fatalf("Failed to make client: %v", err)
klog.Fatalf("Failed to make client: %v", err)
}
namespace := metav1.NamespaceSystem
envNamespace := os.Getenv("NAMESPACE")
if envNamespace != "" {
if _, err := client.Core().Namespaces().Get(envNamespace, metav1.GetOptions{}); err != nil {
glog.Fatalf("%s namespace doesn't exist: %v", envNamespace, err)
klog.Fatalf("%s namespace doesn't exist: %v", envNamespace, err)
}
namespace = envNamespace
}
@ -97,32 +98,31 @@ func main() {
// If we did not find an elasticsearch logging service then log a warning
// and return without adding any unicast hosts.
if elasticsearch == nil {
glog.Warningf("Failed to find the elasticsearch-logging service: %v", err)
klog.Warningf("Failed to find the elasticsearch-logging service: %v", err)
return
}
var endpoints *api.Endpoints
addrs := []string{}
// Wait for some endpoints.
count := 0
count, _ := strconv.Atoi(os.Getenv("MINIMUM_MASTER_NODES"))
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
endpoints, err = client.Core().Endpoints(namespace).Get(serviceName, metav1.GetOptions{})
if err != nil {
continue
}
addrs = flattenSubsets(endpoints.Subsets)
glog.Infof("Found %s", addrs)
if len(addrs) > 0 && len(addrs) == count {
klog.Infof("Found %s", addrs)
if len(addrs) > 0 && len(addrs) >= count {
break
}
count = len(addrs)
}
// If there was an error finding endpoints then log a warning and quit.
if err != nil {
glog.Warningf("Error finding endpoints: %v", err)
klog.Warningf("Error finding endpoints: %v", err)
return
}
glog.Infof("Endpoints = %s", addrs)
klog.Infof("Endpoints = %s", addrs)
fmt.Printf("discovery.zen.ping.unicast.hosts: [%s]\n", strings.Join(addrs, ", "))
}

View File

@ -26,4 +26,4 @@ export MINIMUM_MASTER_NODES=${MINIMUM_MASTER_NODES:-2}
chown -R elasticsearch:elasticsearch /data
./bin/elasticsearch_logging_discovery >> ./config/elasticsearch.yml
exec su elasticsearch -c ./bin/es-docker
exec su elasticsearch -c /usr/local/bin/docker-entrypoint.sh

View File

@ -54,7 +54,7 @@ metadata:
namespace: kube-system
labels:
k8s-app: elasticsearch-logging
version: v5.6.4
version: v6.3.0
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
@ -63,17 +63,17 @@ spec:
selector:
matchLabels:
k8s-app: elasticsearch-logging
version: v5.6.4
version: v6.3.0
template:
metadata:
labels:
k8s-app: elasticsearch-logging
version: v5.6.4
version: v6.3.0
kubernetes.io/cluster-service: "true"
spec:
serviceAccountName: elasticsearch-logging
containers:
- image: k8s.gcr.io/elasticsearch:v5.6.4
- image: k8s.gcr.io/elasticsearch:v6.3.0
name: elasticsearch-logging
resources:
# need more cpu upon initialization, therefore burstable class

View File

@ -1,7 +1,7 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: fluentd-es-config-v0.1.4
name: fluentd-es-config-v0.1.6
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
@ -115,7 +115,6 @@ data:
@type tail
path /var/log/containers/*.log
pos_file /var/log/es-containers.log.pos
time_format %Y-%m-%dT%H:%M:%S.%NZ
tag raw.kubernetes.*
read_from_head true
<parse>
@ -273,21 +272,6 @@ data:
tag kube-scheduler
</source>
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
<source>
@id rescheduler.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/rescheduler.log
pos_file /var/log/es-rescheduler.log.pos
tag rescheduler
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
@ -323,10 +307,11 @@ data:
<source>
@id journald-docker
@type systemd
filters [{ "_SYSTEMD_UNIT": "docker.service" }]
matches [{ "_SYSTEMD_UNIT": "docker.service" }]
<storage>
@type local
persistent true
path /var/log/journald-docker.pos
</storage>
read_from_head true
tag docker
@ -335,10 +320,11 @@ data:
<source>
@id journald-container-runtime
@type systemd
filters [{ "_SYSTEMD_UNIT": "{{ container_runtime }}.service" }]
matches [{ "_SYSTEMD_UNIT": "{{ fluentd_container_runtime_service }}.service" }]
<storage>
@type local
persistent true
path /var/log/journald-container-runtime.pos
</storage>
read_from_head true
tag container-runtime
@ -347,10 +333,11 @@ data:
<source>
@id journald-kubelet
@type systemd
filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
matches [{ "_SYSTEMD_UNIT": "kubelet.service" }]
<storage>
@type local
persistent true
path /var/log/journald-kubelet.pos
</storage>
read_from_head true
tag kubelet
@ -359,22 +346,24 @@ data:
<source>
@id journald-node-problem-detector
@type systemd
filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
matches [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
<storage>
@type local
persistent true
path /var/log/journald-node-problem-detector.pos
</storage>
read_from_head true
tag node-problem-detector
</source>
<source>
@id kernel
@type systemd
filters [{ "_TRANSPORT": "kernel" }]
matches [{ "_TRANSPORT": "kernel" }]
<storage>
@type local
persistent true
path /var/log/kernel.pos
</storage>
<entry>
fields_strip_underscores true
@ -431,10 +420,19 @@ data:
@type kubernetes_metadata
</filter>
# Concatenate multi-line logs
<filter **>
@type concat
key message
multiline_end_regexp /\n$/
separator ""
</filter>
<match **>
@id elasticsearch
@type elasticsearch
@log_level info
type_name fluentd
include_tag_key true
host elasticsearch-logging
port 9200

View File

@ -48,24 +48,24 @@ roleRef:
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd-es-v2.0.4
name: fluentd-es-v2.2.1
namespace: kube-system
labels:
k8s-app: fluentd-es
version: v2.0.4
version: v2.2.1
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: fluentd-es
version: v2.0.4
version: v2.2.1
template:
metadata:
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
version: v2.0.4
version: v2.2.1
# This annotation ensures that fluentd does not get evicted if the node
# supports critical pod annotation based priority scheme.
# Note that this does not guarantee admission on the nodes (#40573).
@ -77,7 +77,7 @@ spec:
serviceAccountName: fluentd-es
containers:
- name: fluentd-es
image: k8s.gcr.io/fluentd-elasticsearch:v2.0.4
image: k8s.gcr.io/fluentd-elasticsearch:v2.2.0
env:
- name: FLUENTD_ARGS
value: --no-supervisor -q
@ -107,4 +107,4 @@ spec:
path: /var/lib/docker/containers
- name: config-volume
configMap:
name: fluentd-es-config-v0.1.4
name: fluentd-es-config-v0.1.6

View File

@ -55,4 +55,4 @@ EXPOSE 80
ENV LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
# Start Fluentd to pick up our config that watches Docker container logs.
CMD /run.sh $FLUENTD_ARGS
CMD ["/run.sh"]

View File

@ -1,11 +1,12 @@
source 'https://rubygems.org'
gem 'fluentd', '<=1.1.0'
gem 'activesupport', '~>5.1.4'
gem 'fluent-plugin-kubernetes_metadata_filter', '~>1.0.0'
gem 'fluent-plugin-elasticsearch', '~>2.4.1'
gem 'fluent-plugin-systemd', '~>0.3.1'
gem 'fluent-plugin-detect-exceptions', '~>0.0.9'
gem 'fluent-plugin-prometheus', '~>0.3.0'
gem 'fluentd', '<=1.2.4'
gem 'activesupport', '~>5.2.1'
gem 'fluent-plugin-concat', '~>2.3.0'
gem 'fluent-plugin-detect-exceptions', '~>0.0.11'
gem 'fluent-plugin-elasticsearch', '~>2.11.5'
gem 'fluent-plugin-kubernetes_metadata_filter', '~>2.0.0'
gem 'fluent-plugin-multi-format-parser', '~>1.0.0'
gem 'oj', '~>3.3.1.0'
gem 'fluent-plugin-prometheus', '~>1.0.1'
gem 'fluent-plugin-systemd', '~>1.0.1'
gem 'oj', '~>3.6.5'

View File

@ -16,7 +16,7 @@
PREFIX = staging-k8s.gcr.io
IMAGE = fluentd-elasticsearch
TAG = v2.0.4
TAG = v2.3.1
build:
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .

View File

@ -20,4 +20,4 @@
# For systems without journald
mkdir -p /var/log/journal
exec /usr/local/bin/fluentd $@
exec /usr/local/bin/fluentd $FLUENTD_ARGS

View File

@ -21,7 +21,7 @@ spec:
spec:
containers:
- name: kibana-logging
image: docker.elastic.co/kibana/kibana:5.6.4
image: docker.elastic.co/kibana/kibana-oss:6.3.2
resources:
# need more cpu upon initialization, therefore burstable class
limits:
@ -33,10 +33,6 @@ spec:
value: http://elasticsearch-logging:9200
- name: SERVER_BASEPATH
value: /api/v1/namespaces/kube-system/services/kibana-logging/proxy
- name: XPACK_MONITORING_ENABLED
value: "false"
- name: XPACK_SECURITY_ENABLED
value: "false"
ports:
- containerPort: 5601
name: ui

View File

@ -4,3 +4,5 @@ approvers:
reviewers:
- piosz
- x13n
labels:
- sig/gcp

View File

@ -29,11 +29,11 @@ subjects:
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: event-exporter-v0.2.1
name: event-exporter-v0.2.3
namespace: kube-system
labels:
k8s-app: event-exporter
version: v0.2.1
version: v0.2.3
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
@ -42,18 +42,18 @@ spec:
metadata:
labels:
k8s-app: event-exporter
version: v0.2.1
version: v0.2.3
spec:
serviceAccountName: event-exporter-sa
containers:
- name: event-exporter
image: k8s.gcr.io/event-exporter:v0.2.1
image: k8s.gcr.io/event-exporter:v0.2.3
command:
- /event-exporter
- -sink-opts=-stackdriver-resource-model={{ exporter_sd_resource_model }}
# BEGIN_PROMETHEUS_TO_SD
- name: prometheus-to-sd-exporter
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
image: k8s.gcr.io/prometheus-to-sd:v0.3.1
command:
- /monitor
- --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons

View File

@ -61,16 +61,18 @@ data:
# reform.var.log.containers.<POD_NAME>_<NAMESPACE_NAME>_<CONTAINER_NAME>-<CONTAINER_ID>.log
tag reform.*
read_from_head true
format multi_format
<pattern>
format json
time_key time
time_format %Y-%m-%dT%H:%M:%S.%NZ
</pattern>
<pattern>
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
time_format %Y-%m-%dT%H:%M:%S.%N%:z
</pattern>
<parse>
@type multi_format
<pattern>
format json
time_key time
time_format %Y-%m-%dT%H:%M:%S.%NZ
</pattern>
<pattern>
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
time_format %Y-%m-%dT%H:%M:%S.%N%:z
</pattern>
</parse>
</source>
<filter reform.**>
@ -210,20 +212,6 @@ data:
tag kube-scheduler
</source>
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
<source>
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/rescheduler.log
pos_file /var/log/gcp-rescheduler.log.pos
tag rescheduler
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
@ -265,7 +253,7 @@ data:
<source>
@type systemd
filters [{ "_SYSTEMD_UNIT": "{{ container_runtime }}.service" }]
filters [{ "_SYSTEMD_UNIT": "{{ fluentd_container_runtime_service }}.service" }]
pos_file /var/log/gcp-journald-container-runtime.pos
read_from_head true
tag container-runtime
@ -301,7 +289,7 @@ data:
@type grep
<exclude>
key _SYSTEMD_UNIT
pattern ^(docker|{{ container_runtime }}|kubelet|node-problem-detector)\.service$
pattern ^(docker|{{ fluentd_container_runtime_service }}|kubelet|node-problem-detector)\.service$
</exclude>
</filter>
# END_NODE_JOURNAL

View File

@ -64,16 +64,18 @@ data:
# reform.var.log.containers.<POD_NAME>_<NAMESPACE_NAME>_<CONTAINER_NAME>-<CONTAINER_ID>.log
tag reform.*
read_from_head true
format multi_format
<pattern>
format json
time_key time
time_format %Y-%m-%dT%H:%M:%S.%NZ
</pattern>
<pattern>
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
time_format %Y-%m-%dT%H:%M:%S.%N%:z
</pattern>
<parse>
@type multi_format
<pattern>
format json
time_key time
time_format %Y-%m-%dT%H:%M:%S.%NZ
</pattern>
<pattern>
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
time_format %Y-%m-%dT%H:%M:%S.%N%:z
</pattern>
</parse>
</source>
<filter reform.**>
@ -98,6 +100,8 @@ data:
# instead of jsonPayload after extracting 'time', 'severity' and
# 'stream' from the record.
message ${record['log']}
# If 'severity' is not set, assume stderr is ERROR and stdout is INFO.
severity ${record['severity'] || if record['stream'] == 'stderr' then 'ERROR' else 'INFO' end}
</record>
tag ${if record['stream'] == 'stderr' then 'raw.stderr' else 'raw.stdout' end}
remove_keys stream,log
@ -108,8 +112,8 @@ data:
@type detect_exceptions
remove_tag_prefix raw
message log
stream stream
message message
stream "logging.googleapis.com/local_resource_id"
multiline_flush_interval 5
max_bytes 500000
max_lines 1000
@ -223,20 +227,6 @@ data:
tag kube-scheduler
</source>
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
<source>
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/rescheduler.log
pos_file /var/log/gcp-rescheduler.log.pos
tag rescheduler
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
@ -278,7 +268,7 @@ data:
<source>
@type systemd
filters [{ "_SYSTEMD_UNIT": "{{ container_runtime }}.service" }]
filters [{ "_SYSTEMD_UNIT": "{{ fluentd_container_runtime_service }}.service" }]
pos_file /var/log/gcp-journald-container-runtime.pos
read_from_head true
tag container-runtime
@ -314,7 +304,7 @@ data:
@type grep
<exclude>
key _SYSTEMD_UNIT
pattern ^(docker|{{ container_runtime }}|kubelet|node-problem-detector)\.service$
pattern ^(docker|{{ fluentd_container_runtime_service }}|kubelet|node-problem-detector)\.service$
</exclude>
</filter>
# END_NODE_JOURNAL
@ -386,6 +376,12 @@ data:
@type null
</match>
# Add a unique insertId to each log entry that doesn't already have it.
# This helps guarantee the order and prevent log duplication.
<filter **>
@type add_insert_ids
</filter>
# This section is exclusive for k8s_container logs. These logs come with
# 'stderr'/'stdout' tags.
# We use a separate output stanza for 'k8s_node' logs with a smaller buffer
@ -408,9 +404,9 @@ data:
buffer_queue_full_action block
# Set the chunk limit conservatively to avoid exceeding the recommended
# chunk size of 5MB per write request.
buffer_chunk_limit 1M
buffer_chunk_limit 512k
# Cap the combined memory usage of this buffer and the one below to
# 1MiB/chunk * (6 + 2) chunks = 8 MiB
# 512KiB/chunk * (6 + 2) chunks = 4 MiB
buffer_queue_limit 6
# Never wait more than 5 seconds before flushing logs in the non-error case.
flush_interval 5s
@ -421,8 +417,9 @@ data:
# Use multiple threads for processing.
num_threads 2
use_grpc true
# Use Metadata Agent to get monitored resource.
enable_metadata_agent true
# Skip timestamp adjustment as this is in a controlled environment with
# known timestamp format. This helps with CPU usage.
adjust_invalid_timestamps false
</match>
# Attach local_resource_id for 'k8s_node' monitored resource.
@ -450,15 +447,16 @@ data:
buffer_type file
buffer_path /var/log/fluentd-buffers/kubernetes.system.buffer
buffer_queue_full_action block
buffer_chunk_limit 1M
buffer_chunk_limit 512k
buffer_queue_limit 2
flush_interval 5s
max_retry_wait 30
disable_retry_limit
num_threads 2
use_grpc true
# Use Metadata Agent to get monitored resource.
enable_metadata_agent true
# Skip timestamp adjustment as this is in a controlled environment with
# known timestamp format. This helps with CPU usage.
adjust_invalid_timestamps false
</match>
metadata:
name: fluentd-gcp-config-v1.2.5

View File

@ -1,13 +1,13 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: fluentd-gcp-v3.0.0
name: fluentd-gcp-{{ fluentd_gcp_yaml_version }}
namespace: kube-system
labels:
k8s-app: fluentd-gcp
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v3.0.0
version: {{ fluentd_gcp_yaml_version }}
spec:
updateStrategy:
type: RollingUpdate
@ -16,7 +16,7 @@ spec:
labels:
k8s-app: fluentd-gcp
kubernetes.io/cluster-service: "true"
version: v3.0.0
version: {{ fluentd_gcp_yaml_version }}
# This annotation ensures that fluentd does not get evicted if the node
# supports critical pod annotation based priority scheme.
# Note that this does not guarantee admission on the nodes (#40573).
@ -26,6 +26,7 @@ spec:
priorityClassName: system-node-critical
serviceAccountName: fluentd-gcp
dnsPolicy: Default
hostNetwork: true
containers:
- name: fluentd-gcp
image: gcr.io/stackdriver-agents/stackdriver-logging-agent:{{ fluentd_gcp_version }}
@ -79,7 +80,7 @@ spec:
fi;
# BEGIN_PROMETHEUS_TO_SD
- name: prometheus-to-sd-exporter
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
image: k8s.gcr.io/prometheus-to-sd:v0.3.1
command:
- /monitor
- --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons
@ -99,14 +100,12 @@ spec:
# END_PROMETHEUS_TO_SD
nodeSelector:
beta.kubernetes.io/fluentd-ds-ready: "true"
terminationGracePeriodSeconds: 60
tolerations:
- key: "node.alpha.kubernetes.io/ismaster"
effect: "NoSchedule"
- operator: "Exists"
effect: "NoExecute"
- operator: "Exists"
effect: "NoSchedule"
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:

View File

@ -19,6 +19,7 @@ spec:
volumes:
- 'hostPath'
- 'secret'
- 'projected'
# TODO: This only needs a hostPath to read /etc/ssl/certs,
# but it should be able to just include these in the image.
allowedHostPaths:

View File

@ -20,6 +20,7 @@ spec:
- 'configMap'
- 'hostPath'
- 'secret'
- 'projected'
allowedHostPaths:
- pathPrefix: /var/log
- pathPrefix: /var/lib/docker/containers

View File

@ -5,7 +5,7 @@ metadata:
namespace: kube-system
labels:
k8s-app: fluentd-gcp-scaler
version: v0.3.0
version: v0.5.0
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
@ -19,10 +19,10 @@ spec:
serviceAccountName: fluentd-gcp-scaler
containers:
- name: fluentd-gcp-scaler
image: k8s.gcr.io/fluentd-gcp-scaler:0.3
image: k8s.gcr.io/fluentd-gcp-scaler:0.5
command:
- /scaler.sh
- --ds-name=fluentd-gcp-v3.0.0
- --ds-name=fluentd-gcp-{{ fluentd_gcp_yaml_version }}
- --scaling-policy=fluentd-gcp-scaling-policy
env:
# Defaults, used if no overrides are found in fluentd-gcp-scaling-policy

View File

@ -29,7 +29,9 @@ spec:
hostNetwork: true
containers:
- name: ip-masq-agent
image: k8s.gcr.io/ip-masq-agent-amd64:v2.0.2
image: k8s.gcr.io/ip-masq-agent-amd64:v2.1.1
args:
- --masq-chain=IP-MASQ
resources:
requests:
cpu: 10m
@ -52,5 +54,9 @@ spec:
- key: config
path: ip-masq-agent
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
- key: "CriticalAddonsOnly"
operator: "Exists"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +1,10 @@
approvers:
- bowei
- dnardo
- freehan
- nicksardo
- mrhohn
- jingax10
reviewers:
- bowei
- dnardo
- freehan
- nicksardo
- mrhohn
- jingax10

View File

@ -1,6 +1,8 @@
approvers:
- kawych
- piosz
- x13n
reviewers:
- kawych
- piosz
- x13n

View File

@ -7,9 +7,7 @@ metadata:
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
- "apps"
- "extensions"
- "*"
resources:
- "*"
verbs:

View File

@ -7,22 +7,6 @@ metadata:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: v1
kind: ConfigMap
metadata:
name: metadata-agent-config
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
data:
node_level.conf: |-
KubernetesUseWatch: true
KubernetesClusterLevelMetadata: false
cluster_level.conf: |-
KubernetesUseWatch: true
KubernetesClusterLevelMetadata: true
---
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
@ -44,28 +28,24 @@ spec:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
serviceAccountName: metadata-agent
priorityClassName: system-node-critical
containers:
- image: gcr.io/stackdriver-agents/stackdriver-metadata-agent:0.2-0.0.19-1
- image: gcr.io/stackdriver-agents/stackdriver-metadata-agent:0.2-0.0.21-1
imagePullPolicy: IfNotPresent
name: metadata-agent
livenessProbe:
exec:
command:
- /bin/bash
- -c
- |
if [[ -f /var/run/metadata-agent/health/unhealthy ]]; then
exit 1;
fi
periodSeconds: 10
httpGet:
path: /healthz
port: 8000
initialDelaySeconds: 30
periodSeconds: 60
timeoutSeconds: 5
failureThreshold: 1
successThreshold: 1
volumeMounts:
- name: metadata-agent-config-volume
mountPath: /etc/config
command:
- /opt/stackdriver/metadata/sbin/metadatad
- --config-file=/etc/config/node_level.conf
args:
- -o KubernetesUseWatch=true
- -o KubernetesClusterLevelMetadata=false
- -o MetadataReporterPurgeDeleted=true
ports:
- containerPort: 8000
hostPort: 8799
@ -78,10 +58,11 @@ spec:
restartPolicy: Always
schedulerName: default-scheduler
terminationGracePeriodSeconds: 30
volumes:
- name: metadata-agent-config-volume
configMap:
name: metadata-agent-config
tolerations:
- operator: "Exists"
effect: "NoExecute"
- operator: "Exists"
effect: "NoSchedule"
updateStrategy:
rollingUpdate:
maxUnavailable: 1
@ -109,28 +90,24 @@ spec:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
serviceAccountName: metadata-agent
priorityClassName: system-cluster-critical
containers:
- image: gcr.io/stackdriver-agents/stackdriver-metadata-agent:0.2-0.0.19-1
- image: gcr.io/stackdriver-agents/stackdriver-metadata-agent:0.2-0.0.21-1
imagePullPolicy: IfNotPresent
name: metadata-agent
livenessProbe:
exec:
command:
- /bin/bash
- -c
- |
if [[ -f /var/run/metadata-agent/health/unhealthy ]]; then
exit 1;
fi
periodSeconds: 10
httpGet:
path: /healthz
port: 8000
initialDelaySeconds: 30
periodSeconds: 60
timeoutSeconds: 5
failureThreshold: 1
successThreshold: 1
volumeMounts:
- name: metadata-agent-config-volume
mountPath: /etc/config
command:
- /opt/stackdriver/metadata/sbin/metadatad
- --config-file=/etc/config/cluster_level.conf
args:
- -o KubernetesUseWatch=true
- -o KubernetesClusterLevelMetadata=true
- -o MetadataReporterPurgeDeleted=true
ports:
- containerPort: 8000
protocol: TCP
@ -142,10 +119,6 @@ spec:
restartPolicy: Always
schedulerName: default-scheduler
terminationGracePeriodSeconds: 30
volumes:
- name: metadata-agent-config-volume
configMap:
name: metadata-agent-config
strategy:
rollingUpdate:
maxUnavailable: 1

View File

@ -0,0 +1,16 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gce:podsecuritypolicy:metadata-agent
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gce:podsecuritypolicy:privileged
subjects:
- kind: ServiceAccount
name: metadata-agent
namespace: kube-system

View File

@ -44,7 +44,7 @@ spec:
effect: "NoSchedule"
containers:
- name: metadata-proxy
image: k8s.gcr.io/metadata-proxy:v0.1.9
image: k8s.gcr.io/metadata-proxy:v0.1.10
securityContext:
privileged: true
# Request and limit resources to get guaranteed QoS.
@ -57,7 +57,7 @@ spec:
cpu: "30m"
# BEGIN_PROMETHEUS_TO_SD
- name: prometheus-to-sd-exporter
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
image: k8s.gcr.io/prometheus-to-sd:v0.3.1
# Request and limit resources to get guaranteed QoS.
resources:
requests:

View File

@ -23,24 +23,24 @@ data:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: metrics-server-v0.2.1
name: metrics-server-v0.3.1
namespace: kube-system
labels:
k8s-app: metrics-server
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v0.2.1
version: v0.3.1
spec:
selector:
matchLabels:
k8s-app: metrics-server
version: v0.2.1
version: v0.3.1
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
version: v0.2.1
version: v0.3.1
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
@ -49,16 +49,20 @@ spec:
serviceAccountName: metrics-server
containers:
- name: metrics-server
image: k8s.gcr.io/metrics-server-amd64:v0.2.1
image: k8s.gcr.io/metrics-server-amd64:v0.3.1
command:
- /metrics-server
- --source=kubernetes.summary_api:''
- --metric-resolution=30s
# These are needed for GKE, which doesn't support secure communication yet.
# Remove these lines for non-GKE clusters, and when GKE supports token-based auth.
- --kubelet-port=10255
- --deprecated-kubelet-completely-insecure=true
ports:
- containerPort: 443
name: https
protocol: TCP
- name: metrics-server-nanny
image: k8s.gcr.io/addon-resizer:1.8.1
image: k8s.gcr.io/addon-resizer:1.8.4
resources:
limits:
cpu: 100m
@ -81,15 +85,18 @@ spec:
command:
- /pod_nanny
- --config-dir=/etc/config
- --cpu=40m
- --cpu={{ base_metrics_server_cpu }}
- --extra-cpu=0.5m
- --memory=40Mi
- --extra-memory=4Mi
- --memory={{ base_metrics_server_memory }}
- --extra-memory={{ metrics_server_memory_per_node }}Mi
- --threshold=5
- --deployment=metrics-server-v0.2.1
- --deployment=metrics-server-v0.3.1
- --container=metrics-server
- --poll-period=300000
- --estimator=exponential
# Specifies the smallest cluster (defined in number of nodes)
# resources will be scaled to.
- --minClusterSize={{ metrics_server_min_cluster_size }}
volumes:
- name: metrics-server-config-volume
configMap:

View File

@ -39,7 +39,7 @@ spec:
initialDelaySeconds: 5
timeoutSeconds: 5
- name: addon-resizer
image: k8s.gcr.io/addon-resizer:1.7
image: k8s.gcr.io/addon-resizer:1.8.4
resources:
limits:
cpu: 100m
@ -56,8 +56,12 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: config-volume
mountPath: /etc/config
command:
- /pod_nanny
- --config-dir=/etc/config
- --container=kube-state-metrics
- --cpu=100m
- --extra-cpu=1m
@ -65,3 +69,23 @@ spec:
- --extra-memory=2Mi
- --threshold=5
- --deployment=kube-state-metrics
volumes:
- name: config-volume
configMap:
name: kube-state-metrics-config
---
# Config map for resource configuration.
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-state-metrics-config
namespace: kube-system
labels:
k8s-app: kube-state-metrics
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
data:
NannyConfiguration: |-
apiVersion: nannyconfig/v1alpha1
kind: NannyConfiguration

View File

@ -1,17 +0,0 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM python:2.7-slim
RUN pip install pyyaml

View File

@ -1,6 +0,0 @@
# Python image
The python image here is used by OS distros that don't have python installed to
run python scripts to parse the yaml files in the addon updater script.
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/python-image/README.md?pixel)]()

View File

@ -33,6 +33,9 @@ rules:
- apiGroups: ["apps"]
resources: ["statefulsets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["get", "list", "watch"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get", "list", "watch"]

View File

@ -0,0 +1,5 @@
approvers:
- tallclair
- dchen1107
reviewers:
- sig-node-reviewers

View File

@ -0,0 +1,12 @@
# RuntimeClass
RuntimeClass is an alpha feature for supporting multiple container runtimes within a cluster. When
enabled, pods can select a RuntimeClass to run with using the `PodSpec.RuntimeClassName` field.
To enable RuntimeClass, set the feature gate `RuntimeClass=true`, and ensure the CRD defined in this
directory is installed.
For more information, see:
https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/runtimeclass/README.md?pixel)]()

View File

@ -0,0 +1,26 @@
kind: CustomResourceDefinition
apiVersion: apiextensions.k8s.io/v1beta1
metadata:
name: runtimeclasses.node.k8s.io
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
group: node.k8s.io
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
names:
plural: runtimeclasses
singular: runtimeclass
kind: RuntimeClass
scope: Cluster
validation:
openAPIV3Schema:
properties:
spec:
properties:
runtimeHandler:
type: string
pattern: '^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)?$'

View File

@ -3,7 +3,7 @@ kind: StorageClass
metadata:
name: gp2
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
storageclass.kubernetes.io/is-default-class: "true"
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: EnsureExists

View File

@ -3,7 +3,7 @@ kind: StorageClass
metadata:
name: standard
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
storageclass.kubernetes.io/is-default-class: "true"
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: EnsureExists

View File

@ -3,7 +3,7 @@ kind: StorageClass
metadata:
name: standard
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
storageclass.kubernetes.io/is-default-class: "true"
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: EnsureExists

View File

@ -4,7 +4,7 @@ metadata:
namespace: kube-system
name: standard
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
storageclass.kubernetes.io/is-default-class: "true"
labels:
addonmanager.kubernetes.io/mode: Reconcile
addonmanager.kubernetes.io/mode: EnsureExists
provisioner: kubernetes.io/host-path

View File

@ -3,7 +3,7 @@ kind: StorageClass
metadata:
name: standard
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
storageclass.kubernetes.io/is-default-class: "true"
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: EnsureExists

View File

@ -3,7 +3,7 @@ kind: StorageClass
metadata:
name: thin
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
storageclass.kubernetes.io/is-default-class: "true"
labels:
kubernetes.io/cluster-service: "true"
provisioner: kubernetes.io/vsphere-volume

View File

@ -0,0 +1,6 @@
approvers:
- saad-ali
- jsafrane
- msau42
reviewers:
- davidz627

View File

@ -0,0 +1,13 @@
# Kubernetes CSI CRDs
The Kubernetes Container Storage Interface implementation defines some API objects as CRDs that Kubernetes components
including the Attach/Detach controller depend on.
If you are using CSI, it is recommended that you enable the relevant feature gates (e.g. `CSIDriverRegistry`, `CSINodeInfo`, etc.), and ensure the CRDs in this directory are installed.
These objects and their CRDs are defined in `staging/src/k8s.io/csi-api/pkg/crd/manifests`, the source of truth.
They are copied from that CRD manifest directory to this addon directory.
A unit test in `staging/src/k8s.io/csi-api/pkg/crd` verifies that this (and any other) copies of the manifest outside of `staging/src/k8s.io/csi-api/pkg/crd/manifests` do not drift from that source of truth.
If you need to make changes please make changes in the `staging/src/k8s.io/csi-api/pkg/crd/manifests` directory and then update this copy.
For more information, see: https://kubernetes-csi.github.io/docs/

View File

@ -0,0 +1,28 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: csidrivers.csi.storage.k8s.io
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
group: csi.storage.k8s.io
names:
kind: CSIDriver
plural: csidrivers
scope: Cluster
validation:
openAPIV3Schema:
properties:
spec:
description: Specification of the CSI Driver.
properties:
attachRequired:
description: Indicates this CSI volume driver requires an attach operation,
and that Kubernetes should call attach and wait for any attach operation
to complete before proceeding to mount.
type: boolean
podInfoOnMountVersion:
description: Indicates this CSI volume driver requires additional pod
information (like podName, podUID, etc.) during mount operations.
type: string
version: v1alpha1

View File

@ -0,0 +1,54 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: csinodeinfos.csi.storage.k8s.io
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
group: csi.storage.k8s.io
names:
kind: CSINodeInfo
plural: csinodeinfos
scope: Cluster
validation:
openAPIV3Schema:
properties:
spec:
description: Specification of CSINodeInfo
properties:
drivers:
description: List of CSI drivers running on the node and their specs.
type: array
items:
properties:
name:
description: The CSI driver that this object refers to.
type: string
nodeID:
description: The node from the driver point of view.
type: string
topologyKeys:
description: List of keys supported by the driver.
items:
type: string
type: array
status:
description: Status of CSINodeInfo
properties:
drivers:
description: List of CSI drivers running on the node and their statuses.
type: array
items:
properties:
name:
description: The CSI driver that this object refers to.
type: string
available:
description: Whether the CSI driver is installed.
type: boolean
volumePluginMechanism:
description: Indicates to external components the required mechanism
to use for any in-tree plugins replaced by this driver.
pattern: in-tree|csi
type: string
version: v1alpha1

View File

@ -124,7 +124,7 @@ export FLANNEL_NET=${FLANNEL_NET:-"172.16.0.0/16"}
# modification is overwritten.
# If we included ResourceQuota, we should keep it at the end of the list to
# prevent incrementing quota usage prematurely.
export ADMISSION_CONTROL=${ADMISSION_CONTROL:-"Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeClaimResize,DefaultTolerationSeconds,Priority,StorageObjectInUseProtection,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"}
export ADMISSION_CONTROL=${ADMISSION_CONTROL:-"NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeClaimResize,DefaultTolerationSeconds,Priority,StorageObjectInUseProtection,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"}
# Extra options to set on the Docker command line.
# This is useful for setting --insecure-registry for local registries.

View File

@ -66,6 +66,8 @@ KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
# LimitRanger, AlwaysDeny, SecurityContextDeny, NamespaceExists,
# NamespaceLifecycle, NamespaceAutoProvision, AlwaysAdmit,
# ServiceAccount, DefaultStorageClass, DefaultTolerationSeconds, ResourceQuota
# Mark Deprecated. Use --enable-admission-plugins or --disable-admission-plugins instead since v1.10.
# It will be removed in a future version.
KUBE_ADMISSION_CONTROL="--admission-control=${ADMISSION_CONTROL}"
# --client-ca-file="": If set, any request presenting a client certificate signed

View File

@ -30,7 +30,8 @@ KUBE_CONTROLLER_MANAGER_ROOT_CA_FILE="--root-ca-file=/srv/kubernetes/ca.crt"
# RSA key used to sign service account tokens.
KUBE_CONTROLLER_MANAGER_SERVICE_ACCOUNT_PRIVATE_KEY_FILE="--service-account-private-key-file=/srv/kubernetes/server.key"
# --leader-elect
# --leader-elect: Start a leader election client and gain leadership before
# executing the main loop. Enable this when running replicated components for high availability.
KUBE_LEADER_ELECT="--leader-elect"
EOF

View File

@ -27,9 +27,11 @@ KUBE_LOGTOSTDERR="--logtostderr=true"
# --v=0: log level for V logs
KUBE_LOG_LEVEL="--v=4"
# --master: The address of the Kubernetes API server (overrides any value in kubeconfig).
KUBE_MASTER="--master=${MASTER_ADDRESS}:8080"
# --leader-elect
# --leader-elect: Start a leader election client and gain leadership before
# executing the main loop. Enable this when running replicated components for high availability.
KUBE_LEADER_ELECT="--leader-elect"
# Add your own!

View File

@ -87,6 +87,7 @@ EnvironmentFile=-/opt/kubernetes/cfg/kubelet
ExecStart=/opt/kubernetes/bin/kubelet ${KUBELET_OPTS}
Restart=on-failure
KillMode=process
RestartSec=15s
[Install]
WantedBy=multi-user.target

View File

@ -47,6 +47,9 @@ case "$(uname -m)" in
arm*)
host_arch=arm
;;
aarch64*)
host_arch=arm64
;;
i?86*)
host_arch=386
;;

View File

@ -3,12 +3,12 @@ kind: RoleBinding
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
name: gce:cloud-provider
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cloud-provider
name: gce:cloud-provider
subjects:
- kind: ServiceAccount
name: cloud-provider
@ -19,11 +19,11 @@ kind: ClusterRoleBinding
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
name: gce:cloud-provider
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cloud-provider
name: gce:cloud-provider
subjects:
- kind: ServiceAccount
name: cloud-provider

View File

@ -3,7 +3,7 @@ kind: Role
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
name: gce:cloud-provider
namespace: kube-system
rules:
- apiGroups:
@ -23,7 +23,51 @@ kind: ClusterRole
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
name: gce:cloud-provider
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
namespace: kube-system
annotations:
kubernetes.io/deprecation: 'cloud-provider role is DEPRECATED in the
concern of potential collisions and will be removed in 1.16. Do not use
this role.'
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- get
- patch
- update
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
annotations:
kubernetes.io/deprecation: 'cloud-provider clusterrole is DEPRECATED in the
concern of potential collisions and will be removed in 1.16. Do not use
this role.'
rules:
- apiGroups:
- ""

View File

@ -0,0 +1,4 @@
# GCE Node Termination Handler
This addon deploys [GCE Node Termination Handler](https://github.com/GoogleCloudPlatform/k8s-node-termination-handler) on to kubernetes clusters on GCP.
It is meant to help translate GCE VM termination notifications into kubernetes graceful terminations.

View File

@ -0,0 +1,76 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
k8s-app: node-termination-handler
namespace: kube-system
name: node-termination-handler
spec:
selector:
matchLabels:
k8s-app: node-termination-handler
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
k8s-app: node-termination-handler
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-node-critical
# Necessary to reboot node
hostPID: true
affinity:
nodeAffinity:
# Restrict to GPU nodes or preemptible nodes
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: cloud.google.com/gke-accelerator
operator: Exists
- matchExpressions:
- key: cloud.google.com/gke-preemptible
operator: Exists
volumes:
- name: klet-service-account
hostPath:
path: /var/lib/kubelet
- name: klet-ca-crt
hostPath:
path: /etc/srv/kubernetes
tolerations:
# Run regardless of any existing taints.
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
containers:
- image: k8s.gcr.io/gke-node-termination-handler@sha256:e08ca863a547754fa7b75064bdad04f04cbef86c7b0a181ecc7304e747623181
name: node-termination-handler
command: ["./node-termination-handler"]
args: ["--logtostderr", "--exclude-pods=$(POD_NAME):$(POD_NAMESPACE)", "-v=10", "--kubeconfig=/var/lib/kubelet/kubeconfig", "--annotation=cloud.google.com/impending-node-termination"]
securityContext:
capabilities:
# Necessary to reboot node
add: ["SYS_BOOT"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
resources:
limits:
cpu: 50m
memory: 30Mi
volumeMounts:
- name: klet-service-account
mountPath: /var/lib/kubelet
- name: klet-ca-crt
mountPath: /etc/srv/kubernetes

View File

@ -15,6 +15,7 @@ spec:
volumes:
- 'nfs'
- 'secret' # Required for service account credentials.
- 'projected'
hostNetwork: false
hostIPC: false
hostPID: false

View File

@ -19,10 +19,27 @@ metadata:
spec:
privileged: false
allowPrivilegeEscalation: false
# The docker default set of capabilities
allowedCapabilities:
- SETPCAP
- MKNOD
- AUDIT_WRITE
- CHOWN
- NET_RAW
- DAC_OVERRIDE
- FOWNER
- FSETID
- KILL
- SETGID
- SETUID
- NET_BIND_SERVICE
- SYS_CHROOT
- SETFCAP
volumes:
- 'emptyDir'
- 'configMap'
- 'secret'
- 'projected'
hostNetwork: false
hostIPC: false
hostPID: false

View File

@ -37,6 +37,14 @@ MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-$(get-master-root-disk-size)}
NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard}
NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
NODE_LOCAL_SSDS=${NODE_LOCAL_SSDS:-0}
# Historically fluentd was a manifest pod and then was migrated to DaemonSet.
# To avoid situation during cluster upgrade when there are two instances
# of fluentd running on a node, kubelet need to mark node on which
# fluentd is not running as a manifest pod with appropriate label.
# TODO(piosz): remove this in 1.8
NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
# An extension to local SSDs allowing users to specify block/fs and SCSI/NVMe devices
# Format of this variable will be "#,scsi/nvme,block/fs" you can specify multiple
# configurations by separating them by a semi-colon ex. "2,scsi,fs;1,nvme,block"
@ -51,6 +59,7 @@ PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false}
KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true}
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-} # default value calculated below
CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false}
MIG_WAIT_UNTIL_STABLE_TIMEOUT=${MIG_WAIT_UNTIL_STABLE_TIMEOUT:-1800}
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
@ -73,7 +82,6 @@ fi
# you are updating the os image versions, update this variable.
# Also please update corresponding image for node e2e at:
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
CVM_VERSION=${CVM_VERSION:-container-vm-v20170627}
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-65-10323-64-0}
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
@ -106,7 +114,6 @@ CLUSTER_NAME="${CLUSTER_NAME:-${INSTANCE_PREFIX}}"
MASTER_NAME="${INSTANCE_PREFIX}-master"
AGGREGATOR_MASTER_NAME="${INSTANCE_PREFIX}-aggregator"
INITIAL_ETCD_CLUSTER="${MASTER_NAME}"
ETCD_QUORUM_READ="${ENABLE_ETCD_QUORUM_READ:-false}"
MASTER_TAG="${INSTANCE_PREFIX}-master"
NODE_TAG="${INSTANCE_PREFIX}-minion"
@ -164,16 +171,15 @@ ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}"
# Useful for scheduling heapster in large clusters with nodes of small size.
HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
# Historically fluentd was a manifest pod and then was migrated to DaemonSet.
# To avoid situation during cluster upgrade when there are two instances
# of fluentd running on a node, kubelet need to mark node on which
# fluentd is not running as a manifest pod with appropriate label.
# TODO(piosz): remove this in 1.8
NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
# NON_MASTER_NODE_LABELS are labels will only be applied on non-master nodes.
NON_MASTER_NODE_LABELS="${KUBE_NON_MASTER_NODE_LABELS:-}"
if [[ "${PREEMPTIBLE_MASTER}" == "true" ]]; then
NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-preemptible=true"
elif [[ "${PREEMPTIBLE_NODE}" == "true" ]]; then
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-preemptible=true"
fi
# To avoid running Calico on a node that is not configured appropriately,
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
if [[ ${NETWORK_POLICY_PROVIDER:-} == "calico" ]]; then
@ -183,18 +189,24 @@ fi
# Optional: Enable netd.
ENABLE_NETD="${KUBE_ENABLE_NETD:-false}"
CUSTOM_NETD_YAML="${KUBE_CUSTOM_NETD_YAML:-}"
CUSTOM_CALICO_NODE_DAEMONSET_YAML="${KUBE_CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}"
CUSTOM_TYPHA_DEPLOYMENT_YAML="${KUBE_CUSTOM_TYPHA_DEPLOYMENT_YAML:-}"
# To avoid running netd on a node that is not configured appropriately,
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
if [[ ${ENABLE_NETD:-} == "true" ]]; then
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}beta.kubernetes.io/kube-netd-ready=true"
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}cloud.google.com/gke-netd-ready=true"
fi
ENABLE_NODELOCAL_DNS="${KUBE_ENABLE_NODELOCAL_DNS:-false}"
LOCAL_DNS_IP="${KUBE_LOCAL_DNS_IP:-169.254.20.10}"
# Enable metadata concealment by firewalling pod traffic to the metadata server
# and run a proxy daemonset on nodes.
#
# TODO(#8867) Enable by default.
ENABLE_METADATA_CONCEALMENT="${ENABLE_METADATA_CONCEALMENT:-false}" # true, false
METADATA_CONCEALMENT_NO_FIREWALL="${METADATA_CONCEALMENT_NO_FIREWALL:-false}" # true, false
if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then
# Put the necessary label on the node so the daemonset gets scheduled.
NODE_LABELS="${NODE_LABELS},beta.kubernetes.io/metadata-proxy-ready=true"
@ -207,8 +219,8 @@ fi
ENCRYPTION_PROVIDER_CONFIG="${ENCRYPTION_PROVIDER_CONFIG:-}"
if [[ -z "${ENCRYPTION_PROVIDER_CONFIG}" ]]; then
ENCRYPTION_PROVIDER_CONFIG=$(cat << EOM | base64 | tr -d '\r\n'
kind: EncryptionConfig
apiVersion: v1
kind: EncryptionConfiguration
apiVersion: apiserver.config.k8s.io/v1
resources:
- resources:
- secrets
@ -216,7 +228,7 @@ resources:
- aesgcm:
keys:
- name: key1
secret: $(dd if=/dev/random bs=32 count=1 status=none | base64 | tr -d '\r\n')
secret: $(dd if=/dev/urandom iflag=fullblock bs=32 count=1 2>/dev/null | base64 | tr -d '\r\n')
EOM
)
fi
@ -247,13 +259,13 @@ FEATURE_GATES="${KUBE_FEATURE_GATES:-ExperimentalCriticalPodAnnotation=true}"
if [[ ! -z "${NODE_ACCELERATORS}" ]]; then
FEATURE_GATES="${FEATURE_GATES},DevicePlugins=true"
if [[ "${NODE_ACCELERATORS}" =~ .*type=([a-zA-Z0-9-]+).* ]]; then
NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}"
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}"
fi
fi
# Optional: Install cluster DNS.
# Set CLUSTER_DNS_CORE_DNS to 'true' to install CoreDNS instead of kube-dns.
CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-false}"
# Set CLUSTER_DNS_CORE_DNS to 'false' to install kube-dns instead of CoreDNS.
CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-true}"
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="${KUBE_DNS_SERVER_IP:-10.0.0.10}"
DNS_DOMAIN="${KUBE_DNS_DOMAIN:-cluster.local}"
@ -286,9 +298,6 @@ if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
AUTOSCALER_EXPANDER_CONFIG="${KUBE_AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
fi
# Optional: Enable Rescheduler
ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
# Optional: Enable allocation of pod IPs using IP aliases.
#
# BETA FEATURE.
@ -339,7 +348,7 @@ if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
fi
# Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection
ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
@ -391,10 +400,6 @@ METADATA_CLOBBERS_CONFIG="${METADATA_CLOBBERS_CONFIG:-false}"
ENABLE_BIG_CLUSTER_SUBNETS="${ENABLE_BIG_CLUSTER_SUBNETS:-false}"
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]]; then
echo "Warning: Basic audit logging is deprecated and will be removed. Please use advanced auditing instead."
fi
if [[ -n "${LOGROTATE_FILES_MAX_COUNT:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_FILES_MAX_COUNT"
fi
@ -403,7 +408,9 @@ if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
fi
# Fluentd requirements
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.2-1.5.30-1-k8s}"
# YAML exists to trigger a configuration refresh when changes are made.
FLUENTD_GCP_YAML_VERSION="v3.2.0"
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.6-1.6.0-1}"
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}"
@ -414,11 +421,14 @@ HEAPSTER_GCP_MEMORY_PER_NODE="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
HEAPSTER_GCP_BASE_CPU="${HEAPSTER_GCP_BASE_CPU:-80m}"
HEAPSTER_GCP_CPU_PER_NODE="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
# Optional: custom system banner for dashboard addon
CUSTOM_KUBE_DASHBOARD_BANNER="${CUSTOM_KUBE_DASHBOARD_BANNER:-}"
# Default Stackdriver resources version exported by Fluentd-gcp addon
LOGGING_STACKDRIVER_RESOURCE_TYPES="${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER LOGGING_STACKDRIVER_RESOURCE_TYPES"
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_YAML_VERSION FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER LOGGING_STACKDRIVER_RESOURCE_TYPES"
# Fluentd configuration for node-journal
ENABLE_NODE_JOURNAL="${ENABLE_NODE_JOURNAL:-false}"
@ -448,8 +458,12 @@ ROTATE_CERTIFICATES="${ROTATE_CERTIFICATES:-}"
# into kube-controller-manager via `--concurrent-service-syncs`
CONCURRENT_SERVICE_SYNCS="${CONCURRENT_SERVICE_SYNCS:-}"
if [[ "${ENABLE_TOKENREQUEST:-}" == "true" ]]; then
FEATURE_GATES="${FEATURE_GATES},TokenRequest=true"
SERVICEACCOUNT_ISSUER="https://kubernetes.io/${CLUSTER_NAME}"
SERVICEACCOUNT_API_AUDIENCES="https://kubernetes.default.svc"
SERVICEACCOUNT_ISSUER="https://kubernetes.io/${CLUSTER_NAME}"
# Optional: Enable Node termination Handler for Preemptible and GPU VMs.
# https://github.com/GoogleCloudPlatform/k8s-node-termination-handler
ENABLE_NODE_TERMINATION_HANDLER="${ENABLE_NODE_TERMINATION_HANDLER:-false}"
# Override default Node Termination Handler Image
if [[ "${NODE_TERMINATION_HANDLER_IMAGE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} NODE_TERMINATION_HANDLER_IMAGE"
fi

View File

@ -37,6 +37,14 @@ MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-$(get-master-root-disk-size)}
NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard}
NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
NODE_LOCAL_SSDS=${NODE_LOCAL_SSDS:-0}
# Historically fluentd was a manifest pod and then was migrated to DaemonSet.
# To avoid situation during cluster upgrade when there are two instances
# of fluentd running on a node, kubelet need to mark node on which
# fluentd is not running as a manifest pod with appropriate label.
# TODO(piosz): remove this in 1.8
NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
# An extension to local SSDs allowing users to specify block/fs and SCSI/NVMe devices
# Format of this variable will be "#,scsi/nvme,block/fs" you can specify multiple
# configurations by separating them by a semi-colon ex. "2,scsi,fs;1,nvme,block"
@ -45,11 +53,16 @@ NODE_LOCAL_SSDS_EXT=${NODE_LOCAL_SSDS_EXT:-}
NODE_ACCELERATORS=${NODE_ACCELERATORS:-""}
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true}
KUBE_APISERVER_REQUEST_TIMEOUT=300
# Increase initial delay for the apiserver liveness probe, to avoid prematurely tearing it down
KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC=${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-45}
# Also increase the initial delay for etcd just to be safe
ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC=${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-45}
PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false}
PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false}
KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true}
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-true}
CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false}
MIG_WAIT_UNTIL_STABLE_TIMEOUT=${MIG_WAIT_UNTIL_STABLE_TIMEOUT:-1800}
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
@ -75,7 +88,6 @@ ALLOWED_NOTREADY_NODES="${ALLOWED_NOTREADY_NODES:-$((NUM_NODES / 100))}"
# you are updating the os image versions, update this variable.
# Also please update corresponding image for node e2e at:
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
CVM_VERSION=${CVM_VERSION:-container-vm-v20170627}
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-65-10323-64-0}
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
@ -101,7 +113,6 @@ CLUSTER_NAME="${CLUSTER_NAME:-${INSTANCE_PREFIX}}"
MASTER_NAME="${INSTANCE_PREFIX}-master"
AGGREGATOR_MASTER_NAME="${INSTANCE_PREFIX}-aggregator"
INITIAL_ETCD_CLUSTER="${MASTER_NAME}"
ETCD_QUORUM_READ="${ENABLE_ETCD_QUORUM_READ:-false}"
MASTER_TAG="${INSTANCE_PREFIX}-master"
NODE_TAG="${INSTANCE_PREFIX}-minion"
@ -166,7 +177,7 @@ ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}"
# Useful for scheduling heapster in large clusters with nodes of small size.
HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.2.18-0) if you need
# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.2.24-1) if you need
# non-default version.
ETCD_IMAGE="${TEST_ETCD_IMAGE:-}"
ETCD_DOCKER_REPOSITORY="${TEST_ETCD_DOCKER_REPOSITORY:-}"
@ -202,26 +213,29 @@ CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-} ${TEST_CLUSTER_R
SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
KUBEPROXY_TEST_ARGS="${KUBEPROXY_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
# Historically fluentd was a manifest pod and then was migrated to DaemonSet.
# To avoid situation during cluster upgrade when there are two instances
# of fluentd running on a node, kubelet need to mark node on which
# fluentd is not running as a manifest pod with appropriate label.
# TODO(piosz): remove this in 1.8
NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
# NON_MASTER_NODE_LABELS are labels will only be applied on non-master nodes.
NON_MASTER_NODE_LABELS="${KUBE_NON_MASTER_NODE_LABELS:-}"
if [[ "${PREEMPTIBLE_MASTER}" == "true" ]]; then
NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-preemptible=true"
elif [[ "${PREEMPTIBLE_NODE}" == "true" ]]; then
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-preemptible=true"
fi
# Optional: Enable netd.
ENABLE_NETD="${KUBE_ENABLE_NETD:-false}"
CUSTOM_NETD_YAML="${KUBE_CUSTOM_NETD_YAML:-}"
CUSTOM_CALICO_NODE_DAEMONSET_YAML="${KUBE_CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}"
CUSTOM_TYPHA_DEPLOYMENT_YAML="${KUBE_CUSTOM_TYPHA_DEPLOYMENT_YAML:-}"
# To avoid running netd on a node that is not configured appropriately,
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
if [[ ${ENABLE_NETD:-} == "true" ]]; then
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}beta.kubernetes.io/kube-netd-ready=true"
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}cloud.google.com/gke-netd-ready=true"
fi
ENABLE_NODELOCAL_DNS="${KUBE_ENABLE_NODELOCAL_DNS:-false}"
# To avoid running Calico on a node that is not configured appropriately,
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
if [[ ${NETWORK_POLICY_PROVIDER:-} == "calico" ]]; then
@ -231,6 +245,7 @@ fi
# Enable metadata concealment by firewalling pod traffic to the metadata server
# and run a proxy daemonset on nodes.
ENABLE_METADATA_CONCEALMENT="${ENABLE_METADATA_CONCEALMENT:-true}" # true, false
METADATA_CONCEALMENT_NO_FIREWALL="${METADATA_CONCEALMENT_NO_FIREWALL:-false}" # true, false
if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then
# Put the necessary label on the node so the daemonset gets scheduled.
NODE_LABELS="${NODE_LABELS},beta.kubernetes.io/metadata-proxy-ready=true"
@ -254,15 +269,16 @@ fi
if [[ ! -z "${NODE_ACCELERATORS}" ]]; then
FEATURE_GATES="${FEATURE_GATES},DevicePlugins=true"
if [[ "${NODE_ACCELERATORS}" =~ .*type=([a-zA-Z0-9-]+).* ]]; then
NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}"
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}"
fi
fi
# Optional: Install cluster DNS.
# Set CLUSTER_DNS_CORE_DNS to 'true' to install CoreDNS instead of kube-dns.
CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-false}"
# Set CLUSTER_DNS_CORE_DNS to 'false' to install kube-dns instead of CoreDNS.
CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-true}"
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="10.0.0.10"
LOCAL_DNS_IP="${KUBE_LOCAL_DNS_IP:-169.254.20.10}"
DNS_DOMAIN="cluster.local"
# Optional: Enable DNS horizontal autoscaler
@ -293,9 +309,6 @@ if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
AUTOSCALER_EXPANDER_CONFIG="${KUBE_AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
fi
# Optional: Enable Rescheduler
ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
# Optional: Enable allocation of pod IPs using IP aliases.
#
# BETA FEATURE.
@ -346,7 +359,7 @@ if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
fi
if [[ -z "${KUBE_ADMISSION_CONTROL:-}" ]]; then
ADMISSION_CONTROL="Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,PodPreset,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection"
ADMISSION_CONTROL="NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,PodPreset,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection"
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
fi
@ -383,10 +396,6 @@ HAIRPIN_MODE="${HAIRPIN_MODE:-hairpin-veth}" # promiscuous-bridge, hairpin-veth,
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
# Optional: if set to true, a image puller is deployed. Only for use in e2e clusters.
# TODO: Pipe this through GKE e2e clusters once we know it helps.
PREPULL_E2E_IMAGES="${PREPULL_E2E_IMAGES:-true}"
# Evict pods whenever compute resource availability on the nodes gets below a threshold.
EVICTION_HARD="${EVICTION_HARD:-memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%}"
@ -405,10 +414,6 @@ ENABLE_LEGACY_ABAC="${ENABLE_LEGACY_ABAC:-false}" # true, false
ENABLE_APISERVER_ADVANCED_AUDIT="${ENABLE_APISERVER_ADVANCED_AUDIT:-true}" # true, false
ADVANCED_AUDIT_LOG_MODE="${ADVANCED_AUDIT_LOG_MODE:-batch}" # batch, blocking
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]]; then
echo "Warning: Basic audit logging is deprecated and will be removed. Please use advanced auditing instead."
fi
ENABLE_BIG_CLUSTER_SUBNETS="${ENABLE_BIG_CLUSTER_SUBNETS:-false}"
if [[ -n "${LOGROTATE_FILES_MAX_COUNT:-}" ]]; then
@ -419,7 +424,9 @@ if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
fi
# Fluentd requirements
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.2-1.5.30-1-k8s}"
# YAML exists to trigger a configuration refresh when changes are made.
FLUENTD_GCP_YAML_VERSION="v3.2.0"
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.6-1.6.0-1}"
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}"
@ -430,11 +437,14 @@ HEAPSTER_GCP_MEMORY_PER_NODE="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
HEAPSTER_GCP_BASE_CPU="${HEAPSTER_GCP_BASE_CPU:-80m}"
HEAPSTER_GCP_CPU_PER_NODE="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
# Optional: custom system banner for dashboard addon
CUSTOM_KUBE_DASHBOARD_BANNER="${CUSTOM_KUBE_DASHBOARD_BANNER:-}"
# Default Stackdriver resources version exported by Fluentd-gcp addon
LOGGING_STACKDRIVER_RESOURCE_TYPES="${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER LOGGING_STACKDRIVER_RESOURCE_TYPES"
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_YAML_VERSION FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER LOGGING_STACKDRIVER_RESOURCE_TYPES"
# Fluentd configuration for node-journal
ENABLE_NODE_JOURNAL="${ENABLE_NODE_JOURNAL:-false}"
@ -467,8 +477,12 @@ ROTATE_CERTIFICATES="${ROTATE_CERTIFICATES:-}"
# into kube-controller-manager via `--concurrent-service-syncs`
CONCURRENT_SERVICE_SYNCS="${CONCURRENT_SERVICE_SYNCS:-}"
if [[ "${ENABLE_TOKENREQUEST:-}" == "true" ]]; then
FEATURE_GATES="${FEATURE_GATES},TokenRequest=true"
SERVICEACCOUNT_ISSUER="https://kubernetes.io/${CLUSTER_NAME}"
SERVICEACCOUNT_API_AUDIENCES="https://kubernetes.default.svc"
SERVICEACCOUNT_ISSUER="https://kubernetes.io/${CLUSTER_NAME}"
# Optional: Enable Node termination Handler for Preemptible and GPU VMs.
# https://github.com/GoogleCloudPlatform/k8s-node-termination-handler
ENABLE_NODE_TERMINATION_HANDLER="${ENABLE_NODE_TERMINATION_HANDLER:-false}"
# Override default Node Termination Handler Image
if [[ "${NODE_TERMINATION_HANDLER_IMAGE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} NODE_TERMINATION_HANDLER_IMAGE"
fi

View File

@ -14,8 +14,8 @@ go_test(
],
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
],
)

4
vendor/k8s.io/kubernetes/cluster/gce/gci/OWNERS generated vendored Normal file
View File

@ -0,0 +1,4 @@
approvers:
- dchen1107
- filbranden
- yguo0905

View File

@ -17,11 +17,13 @@ limitations under the License.
package gci
import (
"bytes"
"encoding/base64"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
@ -51,89 +53,97 @@ readonly DOCKER_REGISTRY="k8s.gcr.io"
readonly ENABLE_LEGACY_ABAC=false
readonly ETC_MANIFESTS=${KUBE_HOME}/etc/kubernetes/manifests
readonly KUBE_API_SERVER_DOCKER_TAG=v1.11.0-alpha.0.1808_3c7452dc11645d-dirty
readonly LOG_OWNER_USER=$(whoami)
readonly LOG_OWNER_USER=$(id -un)
readonly LOG_OWNER_GROUP=$(id -gn)
readonly SERVICEACCOUNT_ISSUER=https://foo.bar.baz
readonly SERVICEACCOUNT_KEY_PATH=/foo/bar/baz.key
{{if .EncryptionProviderConfig}}
ENCRYPTION_PROVIDER_CONFIG={{.EncryptionProviderConfig}}
{{end}}
ENCRYPTION_PROVIDER_CONFIG_PATH={{.EncryptionProviderConfigPath}}
readonly ETCD_KMS_KEY_ID={{.ETCDKMSKeyID}}
{{if .CloudKMSIntegration}}
readonly CLOUD_KMS_INTEGRATION=true
{{end}}
`
kubeAPIServerManifestFileName = "kube-apiserver.manifest"
kmsPluginManifestFileName = "kms-plugin-container.manifest"
kubeAPIServerStartFuncName = "start-kube-apiserver"
// Position of containers within a pod manifest
kmsPluginContainerIndex = 0
apiServerContainerIndexNoKMS = 0
apiServerContainerIndexWithKMS = 1
// command": [
// "/bin/sh", - Index 0
// "-c", - Index 1
// "exec /usr/local/bin/kube-apiserver " - Index 2
execArgsIndex = 2
socketVolumeMountIndexKMSPlugin = 1
socketVolumeMountIndexAPIServer = 0
)
type kubeAPIServerEnv struct {
KubeHome string
EncryptionProviderConfig string
EncryptionProviderConfigPath string
ETCDKMSKeyID string
EncryptionProviderConfig string
CloudKMSIntegration bool
}
type kubeAPIServerManifestTestCase struct {
*ManifestTestCase
apiServerContainer v1.Container
kmsPluginContainer v1.Container
}
func newKubeAPIServerManifestTestCase(t *testing.T) *kubeAPIServerManifestTestCase {
return &kubeAPIServerManifestTestCase{
ManifestTestCase: newManifestTestCase(t, kubeAPIServerManifestFileName, kubeAPIServerStartFuncName, []string{kmsPluginManifestFileName}),
ManifestTestCase: newManifestTestCase(t, kubeAPIServerManifestFileName, kubeAPIServerStartFuncName, nil),
}
}
func (c *kubeAPIServerManifestTestCase) mustLoadContainers() {
func (c *kubeAPIServerManifestTestCase) invokeTest(e kubeAPIServerEnv, kubeEnv string) {
c.mustInvokeFunc(kubeEnv, e)
c.mustLoadPodFromManifest()
switch len(c.pod.Spec.Containers) {
case 1:
c.apiServerContainer = c.pod.Spec.Containers[apiServerContainerIndexNoKMS]
case 2:
c.apiServerContainer = c.pod.Spec.Containers[apiServerContainerIndexWithKMS]
c.kmsPluginContainer = c.pod.Spec.Containers[kmsPluginContainerIndex]
default:
c.t.Fatalf("got %d containers in apiserver pod, want 1 or 2", len(c.pod.Spec.Containers))
}
}
func (c *kubeAPIServerManifestTestCase) invokeTest(e kubeAPIServerEnv) {
c.mustInvokeFunc(deployHelperEnv, e)
c.mustLoadContainers()
}
func getEncryptionProviderConfigFlag(path string) string {
return fmt.Sprintf("--experimental-encryption-provider-config=%s", path)
}
func TestEncryptionProviderFlag(t *testing.T) {
c := newKubeAPIServerManifestTestCase(t)
defer c.tearDown()
var (
// command": [
// "/bin/sh", - Index 0
// "-c", - Index 1
// "exec /usr/local/bin/kube-apiserver " - Index 2
execArgsIndex = 2
encryptionConfigFlag = "--encryption-provider-config"
)
e := kubeAPIServerEnv{
KubeHome: c.kubeHome,
EncryptionProviderConfig: base64.StdEncoding.EncodeToString([]byte("FOO")),
EncryptionProviderConfigPath: filepath.Join(c.kubeHome, "encryption-provider-config.yaml"),
testCases := []struct {
desc string
encryptionProviderConfig string
wantFlag bool
}{
{
desc: "ENCRYPTION_PROVIDER_CONFIG is set",
encryptionProviderConfig: base64.StdEncoding.EncodeToString([]byte("foo")),
wantFlag: true,
},
{
desc: "ENCRYPTION_PROVIDER_CONFIG is not set",
encryptionProviderConfig: "",
wantFlag: false,
},
}
c.invokeTest(e)
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
c := newKubeAPIServerManifestTestCase(t)
defer c.tearDown()
expectedFlag := getEncryptionProviderConfigFlag(e.EncryptionProviderConfigPath)
execArgs := c.apiServerContainer.Command[execArgsIndex]
if !strings.Contains(execArgs, expectedFlag) {
c.t.Fatalf("Got %q, wanted the flag to contain %q", execArgs, expectedFlag)
e := kubeAPIServerEnv{
KubeHome: c.kubeHome,
EncryptionProviderConfigPath: filepath.Join(c.kubeHome, "encryption-provider-config.yaml"),
EncryptionProviderConfig: tc.encryptionProviderConfig,
}
c.invokeTest(e, deployHelperEnv)
execArgs := c.pod.Spec.Containers[0].Command[execArgsIndex]
flagIsInArg := strings.Contains(execArgs, encryptionConfigFlag)
flag := fmt.Sprintf("%s=%s", encryptionConfigFlag, e.EncryptionProviderConfigPath)
switch {
case tc.wantFlag && !flagIsInArg:
t.Fatalf("Got %q,\n want flags to contain %q", execArgs, flag)
case !tc.wantFlag && flagIsInArg:
t.Fatalf("Got %q,\n do not want flags to contain %q", execArgs, encryptionConfigFlag)
case tc.wantFlag && flagIsInArg && !strings.Contains(execArgs, flag):
t.Fatalf("Got flags: %q, want it to contain %q", execArgs, flag)
}
})
}
}
@ -144,8 +154,8 @@ func TestEncryptionProviderConfig(t *testing.T) {
p := filepath.Join(c.kubeHome, "encryption-provider-config.yaml")
e := kubeAPIServerEnv{
KubeHome: c.kubeHome,
EncryptionProviderConfig: base64.StdEncoding.EncodeToString([]byte("FOO")),
EncryptionProviderConfigPath: p,
EncryptionProviderConfig: base64.StdEncoding.EncodeToString([]byte("foo")),
}
c.mustInvokeFunc(deployHelperEnv, e)
@ -153,60 +163,91 @@ func TestEncryptionProviderConfig(t *testing.T) {
if _, err := os.Stat(p); err != nil {
c.t.Fatalf("Expected encryption provider config to be written to %s, but stat failed with error: %v", p, err)
}
}
// TestKMSEncryptionProviderConfig asserts that if ETCD_KMS_KEY_ID is set then start-kube-apiserver will produce
// EncryptionProviderConfig file of type KMS and inject experimental-encryption-provider-config startup flag.
func TestKMSEncryptionProviderConfig(t *testing.T) {
c := newKubeAPIServerManifestTestCase(t)
defer c.tearDown()
e := kubeAPIServerEnv{
KubeHome: c.kubeHome,
EncryptionProviderConfigPath: filepath.Join(c.kubeHome, "encryption-provider-config.yaml"),
ETCDKMSKeyID: "FOO",
}
c.invokeTest(e)
expectedFlag := getEncryptionProviderConfigFlag(e.EncryptionProviderConfigPath)
execArgs := c.apiServerContainer.Command[execArgsIndex]
if !strings.Contains(execArgs, expectedFlag) {
c.t.Fatalf("Got %q, wanted the flag to contain %q", execArgs, expectedFlag)
}
p := filepath.Join(c.kubeHome, "encryption-provider-config.yaml")
if _, err := os.Stat(p); err != nil {
c.t.Fatalf("Expected encryption provider config to be written to %s, but stat failed with error: %v", p, err)
}
d, err := ioutil.ReadFile(p)
got, err := ioutil.ReadFile(p)
if err != nil {
c.t.Fatalf("Failed to read encryption provider config %s", p)
}
if !strings.Contains(string(d), "name: grpc-kms-provider") {
c.t.Fatalf("Got %s\n, wanted encryption provider config to be of type grpc-kms", string(d))
want := []byte("foo")
if !bytes.Equal(got, want) {
c.t.Fatalf("got encryptionConfig:\n%q\n, want encryptionConfig:\n%q", got, want)
}
}
func TestKMSPluginAndAPIServerSharedVolume(t *testing.T) {
c := newKubeAPIServerManifestTestCase(t)
defer c.tearDown()
var e = kubeAPIServerEnv{
KubeHome: c.kubeHome,
EncryptionProviderConfigPath: filepath.Join(c.kubeHome, "encryption-provider-config.yaml"),
ETCDKMSKeyID: "FOO",
func TestKMSIntegration(t *testing.T) {
var (
socketPath = "/var/run/kmsplugin"
dirOrCreate = v1.HostPathType(v1.HostPathDirectoryOrCreate)
socketName = "kmssocket"
)
testCases := []struct {
desc string
cloudKMSIntegration bool
wantVolume v1.Volume
wantVolMount v1.VolumeMount
}{
{
desc: "CLOUD_KMS_INTEGRATION is set",
cloudKMSIntegration: true,
wantVolume: v1.Volume{
Name: socketName,
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: socketPath,
Type: &dirOrCreate,
},
},
},
wantVolMount: v1.VolumeMount{
Name: socketName,
MountPath: socketPath,
},
},
{
desc: "CLOUD_KMS_INTEGRATION is not set",
cloudKMSIntegration: false,
},
}
c.invokeTest(e)
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
c := newKubeAPIServerManifestTestCase(t)
defer c.tearDown()
k := c.kmsPluginContainer.VolumeMounts[socketVolumeMountIndexKMSPlugin].MountPath
a := c.apiServerContainer.VolumeMounts[socketVolumeMountIndexAPIServer].MountPath
var e = kubeAPIServerEnv{
KubeHome: c.kubeHome,
EncryptionProviderConfigPath: filepath.Join(c.kubeHome, "encryption-provider-config.yaml"),
EncryptionProviderConfig: base64.StdEncoding.EncodeToString([]byte("foo")),
CloudKMSIntegration: tc.cloudKMSIntegration,
}
if k != a {
t.Fatalf("Got %s!=%s, wanted KMSPlugin VolumeMount #1:%s to be equal to kube-apiserver VolumeMount #0:%s",
k, a, k, a)
c.invokeTest(e, deployHelperEnv)
// By this point, we can be sure that kube-apiserver manifest is a valid POD.
var gotVolume v1.Volume
for _, v := range c.pod.Spec.Volumes {
if v.Name == socketName {
gotVolume = v
break
}
}
if !reflect.DeepEqual(gotVolume, tc.wantVolume) {
t.Errorf("got volume %v, want %v", gotVolume, tc.wantVolume)
}
var gotVolumeMount v1.VolumeMount
for _, v := range c.pod.Spec.Containers[0].VolumeMounts {
if v.Name == socketName {
gotVolumeMount = v
break
}
}
if !reflect.DeepEqual(gotVolumeMount, tc.wantVolMount) {
t.Errorf("got volumeMount %v, want %v", gotVolumeMount, tc.wantVolMount)
}
})
}
}

View File

@ -25,17 +25,6 @@ set -o errexit
set -o nounset
set -o pipefail
readonly UUID_MNT_PREFIX="/mnt/disks/by-uuid/google-local-ssds"
readonly UUID_BLOCK_PREFIX="/dev/disk/by-uuid/google-local-ssds"
readonly COREDNS_AUTOSCALER="Deployment/coredns"
readonly KUBEDNS_AUTOSCALER="Deployment/kube-dns"
# Use --retry-connrefused opt only if it's supported by curl.
CURL_RETRY_CONNREFUSED=""
if curl --help | grep -q -- '--retry-connrefused'; then
CURL_RETRY_CONNREFUSED='--retry-connrefused'
fi
function setup-os-params {
# Reset core_pattern. On GCI, the default core_pattern pipes the core dumps to
# /sbin/crash_reporter which is more restrictive in saving crash dumps. So for
@ -43,6 +32,40 @@ function setup-os-params {
echo "core.%e.%p.%t" > /proc/sys/kernel/core_pattern
}
# secure_random generates a secure random string of bytes. This function accepts
# a number of secure bytes desired and returns a base64 encoded string with at
# least the requested entropy. Rather than directly reading from /dev/urandom,
# we use uuidgen which calls getrandom(2). getrandom(2) verifies that the
# entropy pool has been initialized sufficiently for the desired operation
# before reading from /dev/urandom.
#
# ARGS:
# #1: number of secure bytes to generate. We round up to the nearest factor of 32.
function secure_random {
local infobytes="${1}"
if ((infobytes <= 0)); then
echo "Invalid argument to secure_random: infobytes='${infobytes}'" 1>&2
return 1
fi
local out=""
for (( i = 0; i < "${infobytes}"; i += 32 )); do
# uuids have 122 random bits, sha256 sums have 256 bits, so concatenate
# three uuids and take their sum. The sum is encoded in ASCII hex, hence the
# 64 character cut.
out+="$(
(
uuidgen --random;
uuidgen --random;
uuidgen --random;
) | sha256sum \
| head -c 64
)";
done
# Finally, convert the ASCII hex to base64 to increase the density.
echo -n "${out}" | xxd -r -p | base64 -w 0
}
function config-ip-firewall {
echo "Configuring IP firewall rules"
@ -51,18 +74,20 @@ function config-ip-firewall {
sysctl -w net.ipv4.conf.all.route_localnet=1
# The GCI image has host firewall which drop most inbound/forwarded packets.
# We need to add rules to accept all TCP/UDP/ICMP packets.
# We need to add rules to accept all TCP/UDP/ICMP/SCTP packets.
if iptables -w -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then
echo "Add rules to accept all inbound TCP/UDP/ICMP packets"
iptables -A INPUT -w -p TCP -j ACCEPT
iptables -A INPUT -w -p UDP -j ACCEPT
iptables -A INPUT -w -p ICMP -j ACCEPT
iptables -A INPUT -w -p SCTP -j ACCEPT
fi
if iptables -w -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then
echo "Add rules to accept all forwarded TCP/UDP/ICMP packets"
echo "Add rules to accept all forwarded TCP/UDP/ICMP/SCTP packets"
iptables -A FORWARD -w -p TCP -j ACCEPT
iptables -A FORWARD -w -p UDP -j ACCEPT
iptables -A FORWARD -w -p ICMP -j ACCEPT
iptables -A FORWARD -w -p SCTP -j ACCEPT
fi
# Flush iptables nat table
@ -568,6 +593,12 @@ EOF
cat <<EOF >>/etc/gce.conf
token-url = ${TOKEN_URL}
token-body = ${TOKEN_BODY}
EOF
fi
if [[ -n "${CONTAINER_API_ENDPOINT:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
container-api-endpoint = ${CONTAINER_API_ENDPOINT}
EOF
fi
if [[ -n "${PROJECT_ID:-}" ]]; then
@ -615,6 +646,15 @@ EOF
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
multizone = ${MULTIZONE}
EOF
fi
# Multimaster indicates that the cluster is HA.
# Currently the only HA clusters are regional.
# If we introduce zonal multimaster this will need to be revisited.
if [[ -n "${MULTIMASTER:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
regional = ${MULTIMASTER}
EOF
fi
if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then
@ -740,7 +780,7 @@ function create-master-audit-policy {
- group: "storage.k8s.io"'
cat <<EOF >"${path}"
apiVersion: audit.k8s.io/v1beta1
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
# The following requests were manually identified as high-volume and low-risk,
@ -788,6 +828,13 @@ rules:
resources:
- group: "" # core
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
- level: None
users: ["cluster-autoscaler"]
verbs: ["get", "update"]
namespaces: ["kube-system"]
resources:
- group: "" # core
resources: ["configmaps", "endpoints"]
# Don't log HPA fetching metrics.
- level: None
users:
@ -1152,6 +1199,7 @@ function start-kubelet {
local -r kubelet_env_file="/etc/default/kubelet"
local kubelet_opts="${KUBELET_ARGS} ${KUBELET_CONFIG_FILE_ARG:-}"
echo "KUBELET_OPTS=\"${kubelet_opts}\"" > "${kubelet_env_file}"
echo "KUBE_COVERAGE_FILE=\"/var/log/kubelet.cov\"" >> "${kubelet_env_file}"
# Write the systemd service file for kubelet.
cat <<EOF >/etc/systemd/system/kubelet.service
@ -1217,10 +1265,12 @@ EOF
# Create the log file and set its properties.
#
# $1 is the file to create.
# $2: the log owner uid to set for the log file.
# $3: the log owner gid to set for the log file.
function prepare-log-file {
touch $1
chmod 644 $1
chown "${LOG_OWNER_USER:-root}":"${LOG_OWNER_GROUP:-root}" $1
chown "${2:-${LOG_OWNER_USER:-root}}":"${3:-${LOG_OWNER_GROUP:-root}}" $1
}
# Prepares parameters for kube-proxy manifest.
@ -1327,7 +1377,6 @@ function prepare-etcd-manifest {
sed -i -e "s@{{ *cpulimit *}}@\"$4\"@g" "${temp_file}"
sed -i -e "s@{{ *hostname *}}@$host_name@g" "${temp_file}"
sed -i -e "s@{{ *host_ip *}}@$host_ip@g" "${temp_file}"
sed -i -e "s@{{ *srv_kube_path *}}@/etc/srv/kubernetes@g" "${temp_file}"
sed -i -e "s@{{ *etcd_cluster *}}@$etcd_cluster@g" "${temp_file}"
sed -i -e "s@{{ *liveness_probe_initial_delay *}}@${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${temp_file}"
# Get default storage backend from manifest file.
@ -1460,8 +1509,12 @@ function start-kube-apiserver {
params+=" --allow-privileged=true"
params+=" --cloud-provider=gce"
params+=" --client-ca-file=${CA_CERT_BUNDLE_PATH}"
params+=" --etcd-servers=http://127.0.0.1:2379"
params+=" --etcd-servers-overrides=/events#http://127.0.0.1:4002"
params+=" --etcd-servers=${ETCD_SERVERS:-http://127.0.0.1:2379}"
if [[ -z "${ETCD_SERVERS:-}" ]]; then
params+=" --etcd-servers-overrides=${ETCD_SERVERS_OVERRIDES:-/events#http://127.0.0.1:4002}"
elif [[ -n "${ETCD_SERVERS_OVERRIDES:-}" ]]; then
params+=" --etcd-servers-overrides=${ETCD_SERVERS_OVERRIDES:-}"
fi
params+=" --secure-port=443"
params+=" --tls-cert-file=${APISERVER_SERVER_CERT_PATH}"
params+=" --tls-private-key-file=${APISERVER_SERVER_KEY_PATH}"
@ -1517,39 +1570,15 @@ function start-kube-apiserver {
if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then
params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
fi
if [[ -n "${ETCD_QUORUM_READ:-}" ]]; then
params+=" --etcd-quorum-read=${ETCD_QUORUM_READ}"
fi
if [[ -n "${SERVICEACCOUNT_ISSUER:-}" ]]; then
params+=" --service-account-issuer=${SERVICEACCOUNT_ISSUER}"
params+=" --service-account-signing-key-file=${SERVICEACCOUNT_KEY_PATH}"
params+=" --service-account-api-audiences=${SERVICEACCOUNT_API_AUDIENCES}"
fi
params+=" --service-account-issuer=${SERVICEACCOUNT_ISSUER}"
params+=" --service-account-api-audiences=${SERVICEACCOUNT_ISSUER}"
params+=" --service-account-signing-key-file=${SERVICEACCOUNT_KEY_PATH}"
local audit_policy_config_mount=""
local audit_policy_config_volume=""
local audit_webhook_config_mount=""
local audit_webhook_config_volume=""
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]]; then
# We currently only support enabling with a fixed path and with built-in log
# rotation "disabled" (large value) so it behaves like kube-apiserver.log.
# External log rotation should be set up the same as for kube-apiserver.log.
params+=" --audit-log-path=/var/log/kube-apiserver-audit.log"
params+=" --audit-log-maxage=0"
params+=" --audit-log-maxbackup=0"
# Lumberjack doesn't offer any way to disable size-based rotation. It also
# has an in-memory counter that doesn't notice if you truncate the file.
# 2000000000 (in MiB) is a large number that fits in 31 bits. If the log
# grows at 10MiB/s (~30K QPS), it will rotate after ~6 years if apiserver
# never restarts. Please manually restart apiserver before this time.
params+=" --audit-log-maxsize=2000000000"
# Disable AdvancedAuditing enabled by default
if [[ -z "${FEATURE_GATES:-}" ]]; then
FEATURE_GATES="AdvancedAuditing=false"
else
FEATURE_GATES="${FEATURE_GATES},AdvancedAuditing=false"
fi
elif [[ "${ENABLE_APISERVER_ADVANCED_AUDIT:-}" == "true" ]]; then
if [[ "${ENABLE_APISERVER_ADVANCED_AUDIT:-}" == "true" ]]; then
local -r audit_policy_file="/etc/audit_policy.config"
params+=" --audit-policy-file=${audit_policy_file}"
# Create the audit policy file, and mount it into the apiserver pod.
@ -1597,8 +1626,6 @@ function start-kube-apiserver {
fi
fi
if [[ "${ADVANCED_AUDIT_BACKEND:-}" == *"webhook"* ]]; then
params+=" --audit-webhook-mode=batch"
# Create the audit webhook config file, and mount it into the apiserver pod.
local -r audit_webhook_config_file="/etc/audit_webhook.config"
params+=" --audit-webhook-config-file=${audit_webhook_config_file}"
@ -1609,6 +1636,8 @@ function start-kube-apiserver {
# Batching parameters
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_MODE:-}" ]]; then
params+=" --audit-webhook-mode=${ADVANCED_AUDIT_WEBHOOK_MODE}"
else
params+=" --audit-webhook-mode=batch"
fi
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE:-}" ]]; then
params+=" --audit-webhook-batch-buffer-size=${ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE}"
@ -1735,46 +1764,27 @@ function start-kube-apiserver {
local container_env=""
if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
container_env="\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\""
container_env+="{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}"
fi
if [[ -n "${ENABLE_PATCH_CONVERSION_DETECTOR:-}" ]]; then
if [[ -n "${container_env}" ]]; then
container_env="${container_env}, "
fi
container_env="\"name\": \"KUBE_PATCH_CONVERSION_DETECTOR\", \"value\": \"${ENABLE_PATCH_CONVERSION_DETECTOR}\""
container_env+="{\"name\": \"KUBE_PATCH_CONVERSION_DETECTOR\", \"value\": \"${ENABLE_PATCH_CONVERSION_DETECTOR}\"}"
fi
if [[ -n "${container_env}" ]]; then
container_env="\"env\":[{${container_env}}],"
container_env="\"env\":[${container_env}],"
fi
if [[ -n "${ETCD_KMS_KEY_ID:-}" ]]; then
ENCRYPTION_PROVIDER_CONFIG=$(cat << EOM | base64 | tr -d '\r\n'
kind: EncryptionConfig
apiVersion: v1
resources:
- resources:
- secrets
providers:
- kms:
name: grpc-kms-provider
cachesize: 1000
endpoint: unix:///var/run/kmsplugin/socket.sock
EOM
)
fi
local -r src_file="${src_dir}/kube-apiserver.manifest"
if [[ -n "${ENCRYPTION_PROVIDER_CONFIG:-}" ]]; then
ENCRYPTION_PROVIDER_CONFIG_PATH="${ENCRYPTION_PROVIDER_CONFIG_PATH:-/etc/srv/kubernetes/encryption-provider-config.yml}"
echo "${ENCRYPTION_PROVIDER_CONFIG}" | base64 --decode > "${ENCRYPTION_PROVIDER_CONFIG_PATH}"
params+=" --experimental-encryption-provider-config=${ENCRYPTION_PROVIDER_CONFIG_PATH}"
fi
# params is passed by reference, so no "$"
setup-etcd-encryption "${src_file}" params
src_file="${src_dir}/kube-apiserver.manifest"
# Evaluate variables.
local -r kube_apiserver_docker_tag="${KUBE_API_SERVER_DOCKER_TAG:-$(cat /home/kubernetes/kube-docker-files/kube-apiserver.docker_tag)}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
sed -i -e "s@{{srv_sshproxy_path}}@/etc/srv/sshproxy@g" "${src_file}"
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
@ -1799,67 +1809,116 @@ EOM
sed -i -e "s@{{image_policy_webhook_config_mount}}@${image_policy_webhook_config_mount}@g" "${src_file}"
sed -i -e "s@{{image_policy_webhook_config_volume}}@${image_policy_webhook_config_volume}@g" "${src_file}"
if [[ -z "${ETCD_KMS_KEY_ID:-}" ]]; then
# Removing KMS related placeholders.
sed -i -e " {
s@{{kms_plugin_container}}@@
cp "${src_file}" "${ETC_MANIFESTS:-/etc/kubernetes/manifests}"
}
s@{{kms_socket_mount}}@@
# Sets-up etcd encryption.
# Configuration of etcd level encryption consists of the following steps:
# 1. Writing encryption provider config to disk
# 2. Adding encryption-provider-config flag to kube-apiserver
# 3. Add kms-socket-vol and kms-socket-vol-mnt to enable communication with kms-plugin (if requested)
#
# Expects parameters:
# $1 - path to kube-apiserver template
# $2 - kube-apiserver startup flags (must be passed by reference)
#
# Assumes vars (supplied via kube-env):
# ENCRYPTION_PROVIDER_CONFIG
# CLOUD_KMS_INTEGRATION
# ENCRYPTION_PROVIDER_CONFIG_PATH (will default to /etc/srv/kubernetes/encryption-provider-config.yml)
function setup-etcd-encryption {
local kube_apiserver_template_path
local -n kube_api_server_params
local default_encryption_provider_config_vol
local default_encryption_provider_config_vol_mnt
local encryption_provider_config_vol_mnt
local encryption_provider_config_vol
local default_kms_socket_dir
local default_kms_socket_vol_mnt
local default_kms_socket_vol
local kms_socket_vol_mnt
local kms_socket_vol
local encryption_provider_config_path
kube_apiserver_template_path="$1"
if [[ -z "${ENCRYPTION_PROVIDER_CONFIG:-}" ]]; then
sed -i -e " {
s@{{encryption_provider_mount}}@@
s@{{kms_socket_volume}}@@
s@{{encryption_provider_volume}}@@
} " "${src_file}"
else
local kms_plugin_src_file="${src_dir}/kms-plugin-container.manifest"
if [[ ! -f "${kms_plugin_src_file}" ]]; then
echo "Error: KMS Integration was requested, but "${kms_plugin_src_file}" is missing."
exit 1
fi
if [[ ! -f "${ENCRYPTION_PROVIDER_CONFIG_PATH}" ]]; then
echo "Error: KMS Integration was requested, but "${ENCRYPTION_PROVIDER_CONFIG_PATH}" is missing."
exit 1
fi
# TODO: Validate that the encryption config is for KMS.
local kms_socket_dir="/var/run/kmsplugin"
# kms_socket_mnt is used by both kms_plugin and kube-apiserver - this is how these containers talk.
local kms_socket_mnt="{ \"name\": \"kmssocket\", \"mountPath\": \"${kms_socket_dir}\", \"readOnly\": false}"
local kms_socket_vol="{ \"name\": \"kmssocket\", \"hostPath\": {\"path\": \"${kms_socket_dir}\", \"type\": \"DirectoryOrCreate\"}}"
local kms_path_to_socket="${kms_socket_dir}/socket.sock"
local encryption_provider_mnt="{ \"name\": \"encryptionconfig\", \"mountPath\": \"${ENCRYPTION_PROVIDER_CONFIG_PATH}\", \"readOnly\": true}"
local encryption_provider_vol="{ \"name\": \"encryptionconfig\", \"hostPath\": {\"path\": \"${ENCRYPTION_PROVIDER_CONFIG_PATH}\", \"type\": \"File\"}}"
# TODO these are used in other places, convert to global.
local gce_conf_path="/etc/gce.conf"
local cloud_config_mount="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true}"
local kms_plugin_container=$(echo $(sed " {
s@{{kms_key_uri}}@${ETCD_KMS_KEY_ID}@
s@{{gce_conf_path}}@${gce_conf_path}@
s@{{kms_path_to_socket}}@${kms_path_to_socket}@
s@{{kms_socket_mount}}@${kms_socket_mnt}@
s@{{cloud_config_mount}}@${cloud_config_mount}@
} " "${kms_plugin_src_file}") | tr "\n" "\\n")
sed -i -e " {
s@{{kms_plugin_container}}@${kms_plugin_container},@
s@{{kms_socket_mount}}@${kms_socket_mnt},@
s@{{encryption_provider_mount}}@${encryption_provider_mnt},@
s@{{kms_socket_volume}}@${kms_socket_vol},@
s@{{encryption_provider_volume}}@${encryption_provider_vol},@
} " "${src_file}"
s@{{kms_socket_mount}}@@
s@{{kms_socket_volume}}@@
} " "${kube_apiserver_template_path}"
return
fi
cp "${src_file}" "${ETC_MANIFESTS:-/etc/kubernetes/manifests}"
kube_api_server_params="$2"
encryption_provider_config_path=${ENCRYPTION_PROVIDER_CONFIG_PATH:-/etc/srv/kubernetes/encryption-provider-config.yml}
echo "${ENCRYPTION_PROVIDER_CONFIG}" | base64 --decode > "${encryption_provider_config_path}"
kube_api_server_params+=" --encryption-provider-config=${encryption_provider_config_path}"
default_encryption_provider_config_vol=$(echo "{ \"name\": \"encryptionconfig\", \"hostPath\": {\"path\": \"${encryption_provider_config_path}\", \"type\": \"File\"}}" | base64 | tr -d '\r\n')
default_encryption_provider_config_vol_mnt=$(echo "{ \"name\": \"encryptionconfig\", \"mountPath\": \"${encryption_provider_config_path}\", \"readOnly\": true}" | base64 | tr -d '\r\n')
encryption_provider_config_vol_mnt=$(echo "${ENCRYPTION_PROVIDER_CONFIG_VOL_MNT:-"${default_encryption_provider_config_vol_mnt}"}" | base64 --decode)
encryption_provider_config_vol=$(echo "${ENCRYPTION_PROVIDER_CONFIG_VOL:-"${default_encryption_provider_config_vol}"}" | base64 --decode)
sed -i -e " {
s@{{encryption_provider_mount}}@${encryption_provider_config_vol_mnt},@
s@{{encryption_provider_volume}}@${encryption_provider_config_vol},@
} " "${kube_apiserver_template_path}"
if [[ -n "${CLOUD_KMS_INTEGRATION:-}" ]]; then
default_kms_socket_dir="/var/run/kmsplugin"
default_kms_socket_vol_mnt=$(echo "{ \"name\": \"kmssocket\", \"mountPath\": \"${default_kms_socket_dir}\", \"readOnly\": false}" | base64 | tr -d '\r\n')
default_kms_socket_vol=$(echo "{ \"name\": \"kmssocket\", \"hostPath\": {\"path\": \"${default_kms_socket_dir}\", \"type\": \"DirectoryOrCreate\"}}" | base64 | tr -d '\r\n')
kms_socket_vol_mnt=$(echo "${KMS_PLUGIN_SOCKET_VOL_MNT:-"${default_kms_socket_vol_mnt}"}" | base64 --decode)
kms_socket_vol=$(echo "${KMS_PLUGIN_SOCKET_VOL:-"${default_kms_socket_vol}"}" | base64 --decode)
sed -i -e " {
s@{{kms_socket_mount}}@${kms_socket_vol_mnt},@
s@{{kms_socket_volume}}@${kms_socket_vol},@
} " "${kube_apiserver_template_path}"
else
sed -i -e " {
s@{{kms_socket_mount}}@@
s@{{kms_socket_volume}}@@
} " "${kube_apiserver_template_path}"
fi
}
# Applies encryption provider config.
# This function may be triggered in two scenarios:
# 1. Decryption of etcd
# 2. Encryption of etcd is added after the cluster is deployed
# Both cases require that the existing secrets in etcd be re-proceeded.
#
# Assumes vars (supplied via kube-env):
# ENCRYPTION_PROVIDER_CONFIG_FORCE
function apply-encryption-config() {
if [[ "${ENCRYPTION_PROVIDER_CONFIG_FORCE:-false}" == "false" ]]; then
return
fi
# need kube-apiserver to be ready
until kubectl get secret; do
sleep ${ENCRYPTION_PROVIDER_CONFIG_FORCE_DELAY:-5}
done
retries=${ENCRYPTION_PROVIDER_CONFIG_FORCE_RETRIES:-5}
# The command below may fail when a conflict is detected during an update on a secret (something
# else updated the secret in the middle of our update).
# TODO: Retry only on errors caused by a conflict.
until (( retries == 0 )); do
# forces all secrets to be re-written to etcd, and in the process either encrypting or decrypting them
# https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/
if kubectl get secrets --all-namespaces -o json | kubectl replace -f -; then
break
fi
(( retries-- ))
sleep "${ENCRYPTION_PROVIDER_CONFIG_FORCE_RETRY_SLEEP:-3}"
done
}
# Starts kubernetes controller manager.
@ -1944,7 +2003,6 @@ function start-kube-controller-manager {
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-controller-manager.manifest"
# Evaluate variables.
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-controller-manager_docker_tag'\]}}@${kube_rc_docker_tag}@g" "${src_file}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
@ -1957,6 +2015,7 @@ function start-kube-controller-manager {
sed -i -e "s@{{pv_recycler_volume}}@${PV_RECYCLER_VOLUME}@g" "${src_file}"
sed -i -e "s@{{flexvolume_hostpath_mount}}@${FLEXVOLUME_HOSTPATH_MOUNT}@g" "${src_file}"
sed -i -e "s@{{flexvolume_hostpath}}@${FLEXVOLUME_HOSTPATH_VOLUME}@g" "${src_file}"
sed -i -e "s@{{cpurequest}}@${KUBE_CONTROLLER_MANAGER_CPU_REQUEST}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
@ -1991,10 +2050,10 @@ function start-kube-scheduler {
# Remove salt comments and replace variables with values.
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-scheduler.manifest"
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-scheduler_docker_tag'\]}}@${kube_scheduler_docker_tag}@g" "${src_file}"
sed -i -e "s@{{cpurequest}}@${KUBE_SCHEDULER_CPU_REQUEST}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
@ -2043,6 +2102,12 @@ function setup-addon-manifests {
copy-manifests "${psp_dir}" "${dst_dir}"
fi
fi
if [[ "${ENABLE_NODE_TERMINATION_HANDLER:-}" == "true" ]]; then
local -r nth_dir="${src_dir}/${3:-$2}/node-termination-handler"
if [[ -d "${nth_dir}" ]]; then
copy-manifests "${nth_dir}" "${dst_dir}"
fi
fi
}
# A function that downloads extra addons from a URL and puts them in the GCI
@ -2167,14 +2232,14 @@ function start-fluentd-resource-update {
wait-for-apiserver-and-update-fluentd &
}
# Update {{ container-runtime }} with actual container runtime name,
# and {{ container-runtime-endpoint }} with actual container runtime
# Update {{ fluentd_container_runtime_service }} with actual container runtime name,
# and {{ container_runtime_endpoint }} with actual container runtime
# endpoint.
function update-container-runtime {
local -r file="$1"
local -r container_runtime_endpoint="${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock}"
sed -i \
-e "s@{{ *container_runtime *}}@${CONTAINER_RUNTIME_NAME:-docker}@g" \
-e "s@{{ *fluentd_container_runtime_service *}}@${FLUENTD_CONTAINER_RUNTIME_SERVICE:-${CONTAINER_RUNTIME_NAME:-docker}}@g" \
-e "s@{{ *container_runtime_endpoint *}}@${container_runtime_endpoint#unix://}@g" \
"${file}"
}
@ -2200,6 +2265,17 @@ function update-prometheus-to-sd-parameters {
fi
}
# Updates parameters in yaml file for prometheus-to-sd configuration in daemon sets, or
# removes component if it is disabled.
function update-daemon-set-prometheus-to-sd-parameters {
if [[ "${DISABLE_PROMETHEUS_TO_SD_IN_DS:-}" == "true" ]]; then
# Removes all lines between two patterns (throws away prometheus-to-sd)
sed -i -e "/# BEGIN_PROMETHEUS_TO_SD/,/# END_PROMETHEUS_TO_SD/d" "$1"
else
update-prometheus-to-sd-parameters $1
fi
}
# Updates parameters in yaml file for event-exporter configuration
function update-event-exporter {
local -r stackdriver_resource_model="${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}"
@ -2232,6 +2308,7 @@ function setup-coredns-manifest {
function setup-fluentd {
local -r dst_dir="$1"
local -r fluentd_gcp_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-ds.yaml"
local -r fluentd_gcp_scaler_yaml="${dst_dir}/fluentd-gcp/scaler-deployment.yaml"
# Ingest logs against new resources like "k8s_container" and "k8s_node" if
# LOGGING_STACKDRIVER_RESOURCE_TYPES is "new".
# Ingest logs against old resources like "gke_container" and "gce_instance" if
@ -2244,9 +2321,12 @@ function setup-fluentd {
fluentd_gcp_configmap_name="fluentd-gcp-config-old"
fi
sed -i -e "s@{{ fluentd_gcp_configmap_name }}@${fluentd_gcp_configmap_name}@g" "${fluentd_gcp_yaml}"
fluentd_gcp_version="${FLUENTD_GCP_VERSION:-0.2-1.5.30-1-k8s}"
fluentd_gcp_yaml_version="${FLUENTD_GCP_YAML_VERSION:-v3.2.0}"
sed -i -e "s@{{ fluentd_gcp_yaml_version }}@${fluentd_gcp_yaml_version}@g" "${fluentd_gcp_yaml}"
sed -i -e "s@{{ fluentd_gcp_yaml_version }}@${fluentd_gcp_yaml_version}@g" "${fluentd_gcp_scaler_yaml}"
fluentd_gcp_version="${FLUENTD_GCP_VERSION:-0.6-1.6.0-1}"
sed -i -e "s@{{ fluentd_gcp_version }}@${fluentd_gcp_version}@g" "${fluentd_gcp_yaml}"
update-prometheus-to-sd-parameters ${fluentd_gcp_yaml}
update-daemon-set-prometheus-to-sd-parameters ${fluentd_gcp_yaml}
start-fluentd-resource-update ${fluentd_gcp_yaml}
update-container-runtime ${fluentd_gcp_configmap_yaml}
update-node-journal ${fluentd_gcp_configmap_yaml}
@ -2259,7 +2339,7 @@ function setup-kube-dns-manifest {
if [ -n "${CUSTOM_KUBE_DNS_YAML:-}" ]; then
# Replace with custom GKE kube-dns deployment.
cat > "${kubedns_file}" <<EOF
$(echo "$CUSTOM_KUBE_DNS_YAML")
$CUSTOM_KUBE_DNS_YAML
EOF
update-prometheus-to-sd-parameters ${kubedns_file}
fi
@ -2274,6 +2354,16 @@ EOF
fi
}
# Sets up the manifests of local dns cache agent for k8s addons.
function setup-nodelocaldns-manifest {
setup-addon-manifests "addons" "dns/nodelocaldns"
local -r localdns_file="${dst_dir}/dns/nodelocaldns/nodelocaldns.yaml"
# Replace the sed configurations with variable values.
sed -i -e "s/__PILLAR__DNS__DOMAIN__/${DNS_DOMAIN}/g" "${localdns_file}"
sed -i -e "s/__PILLAR__DNS__SERVER__/${DNS_SERVER_IP}/g" "${localdns_file}"
sed -i -e "s/__PILLAR__LOCAL__DNS__/${LOCAL_DNS_IP}/g" "${localdns_file}"
}
# Sets up the manifests of netd for k8s addons.
function setup-netd-manifest {
local -r netd_file="${dst_dir}/netd/netd.yaml"
@ -2282,7 +2372,7 @@ function setup-netd-manifest {
if [ -n "${CUSTOM_NETD_YAML:-}" ]; then
# Replace with custom GCP netd deployment.
cat > "${netd_file}" <<EOF
$(echo "$CUSTOM_NETD_YAML")
$CUSTOM_NETD_YAML
EOF
fi
}
@ -2330,9 +2420,9 @@ function start-kube-addons {
if [ -n "${CUSTOM_KUBE_PROXY_YAML:-}" ]; then
# Replace with custom GKE kube proxy.
cat > "$src_dir/kube-proxy/kube-proxy-ds.yaml" <<EOF
$(echo "$CUSTOM_KUBE_PROXY_YAML")
$CUSTOM_KUBE_PROXY_YAML
EOF
update-prometheus-to-sd-parameters "$src_dir/kube-proxy/kube-proxy-ds.yaml"
update-daemon-set-prometheus-to-sd-parameters "$src_dir/kube-proxy/kube-proxy-ds.yaml"
fi
prepare-kube-proxy-manifest-variables "$src_dir/kube-proxy/kube-proxy-ds.yaml"
setup-addon-manifests "addons" "kube-proxy"
@ -2355,10 +2445,17 @@ EOF
base_eventer_memory="190Mi"
base_metrics_cpu="${HEAPSTER_GCP_BASE_CPU:-80m}"
nanny_memory="90Mi"
local -r metrics_memory_per_node="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
local heapster_min_cluster_size="16"
local metrics_memory_per_node="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
local -r metrics_cpu_per_node="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
local -r eventer_memory_per_node="500"
local -r nanny_memory_per_node="200"
if [[ "${ENABLE_SYSTEM_ADDON_RESOURCE_OPTIMIZATIONS:-}" == "true" ]]; then
base_metrics_memory="${HEAPSTER_GCP_BASE_MEMORY:-100Mi}"
base_metrics_cpu="${HEAPSTER_GCP_BASE_CPU:-10m}"
metrics_memory_per_node="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
heapster_min_cluster_size="5"
fi
if [[ -n "${NUM_NODES:-}" && "${NUM_NODES}" -ge 1 ]]; then
num_kube_nodes="$((${NUM_NODES}+1))"
nanny_memory="$((${num_kube_nodes} * ${nanny_memory_per_node} + 90 * 1024))Ki"
@ -2379,6 +2476,7 @@ EOF
sed -i -e "s@{{ *eventer_memory_per_node *}}@${eventer_memory_per_node}@g" "${controller_yaml}"
sed -i -e "s@{{ *nanny_memory *}}@${nanny_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *metrics_cpu_per_node *}}@${metrics_cpu_per_node}@g" "${controller_yaml}"
sed -i -e "s@{{ *heapster_min_cluster_size *}}@${heapster_min_cluster_size}@g" "${controller_yaml}"
update-prometheus-to-sd-parameters ${controller_yaml}
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "stackdriver" ]]; then
@ -2406,10 +2504,29 @@ EOF
fi
if [[ "${ENABLE_METRICS_SERVER:-}" == "true" ]]; then
setup-addon-manifests "addons" "metrics-server"
base_metrics_server_cpu="40m"
base_metrics_server_memory="40Mi"
metrics_server_memory_per_node="4"
metrics_server_min_cluster_size="16"
if [[ "${ENABLE_SYSTEM_ADDON_RESOURCE_OPTIMIZATIONS:-}" == "true" ]]; then
base_metrics_server_cpu="40m"
base_metrics_server_memory="35Mi"
metrics_server_memory_per_node="4"
metrics_server_min_cluster_size="5"
fi
local -r metrics_server_yaml="${dst_dir}/metrics-server/metrics-server-deployment.yaml"
sed -i -e "s@{{ base_metrics_server_cpu }}@${base_metrics_server_cpu}@g" "${metrics_server_yaml}"
sed -i -e "s@{{ base_metrics_server_memory }}@${base_metrics_server_memory}@g" "${metrics_server_yaml}"
sed -i -e "s@{{ metrics_server_memory_per_node }}@${metrics_server_memory_per_node}@g" "${metrics_server_yaml}"
sed -i -e "s@{{ metrics_server_min_cluster_size }}@${metrics_server_min_cluster_size}@g" "${metrics_server_yaml}"
fi
if [[ "${ENABLE_NVIDIA_GPU_DEVICE_PLUGIN:-}" == "true" ]]; then
setup-addon-manifests "addons" "device-plugins/nvidia-gpu"
fi
if [[ "${ENABLE_NODE_TERMINATION_HANDLER:-}" == "true" ]]; then
setup-addon-manifests "addons" "node-termination-handler"
setup-node-termination-handler-manifest
fi
if [[ "${ENABLE_CLUSTER_DNS:-}" == "true" ]]; then
if [[ "${CLUSTER_DNS_CORE_DNS:-}" == "true" ]]; then
setup-addon-manifests "addons" "dns/coredns"
@ -2418,6 +2535,9 @@ EOF
setup-addon-manifests "addons" "dns/kube-dns"
setup-kube-dns-manifest
fi
if [[ "${ENABLE_NODELOCAL_DNS:-}" == "true" ]]; then
setup-nodelocaldns-manifest
fi
fi
if [[ "${ENABLE_NETD:-}" == "true" ]]; then
setup-netd-manifest
@ -2465,13 +2585,16 @@ EOF
if [[ "${ENABLE_DEFAULT_STORAGE_CLASS:-}" == "true" ]]; then
setup-addon-manifests "addons" "storage-class/gce"
fi
if [[ "${FEATURE_GATES:-}" =~ "AllAlpha=true" || "${FEATURE_GATES:-}" =~ "CSIDriverRegistry=true" || "${FEATURE_GATES:-}" =~ "CSINodeInfo=true" ]]; then
setup-addon-manifests "addons" "storage-crds"
fi
if [[ "${ENABLE_IP_MASQ_AGENT:-}" == "true" ]]; then
setup-addon-manifests "addons" "ip-masq-agent"
fi
if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]]; then
setup-addon-manifests "addons" "metadata-proxy/gce"
local -r metadata_proxy_yaml="${dst_dir}/metadata-proxy/gce/metadata-proxy.yaml"
update-prometheus-to-sd-parameters ${metadata_proxy_yaml}
update-daemon-set-prometheus-to-sd-parameters ${metadata_proxy_yaml}
fi
if [[ "${ENABLE_ISTIO:-}" == "true" ]]; then
if [[ "${ISTIO_AUTH_TYPE:-}" == "MUTUAL_TLS" ]]; then
@ -2480,21 +2603,26 @@ EOF
setup-addon-manifests "addons" "istio/noauth"
fi
fi
if [[ "${FEATURE_GATES:-}" =~ "RuntimeClass=true" ]]; then
setup-addon-manifests "addons" "runtimeclass"
fi
if [[ -n "${EXTRA_ADDONS_URL:-}" ]]; then
download-extra-addons
setup-addon-manifests "addons" "gce-extras"
fi
# Place addon manager pod manifest.
cp "${src_dir}/kube-addon-manager.yaml" /etc/kubernetes/manifests
src_file="${src_dir}/kube-addon-manager.yaml"
sed -i -e "s@{{kubectl_extra_prune_whitelist}}@${ADDON_MANAGER_PRUNE_WHITELIST:-}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
# Starts an image-puller - used in test clusters.
function start-image-puller {
echo "Start image-puller"
local -r e2e_image_puller_manifest="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/e2e-image-puller.manifest"
update-container-runtime "${e2e_image_puller_manifest}"
cp "${e2e_image_puller_manifest}" /etc/kubernetes/manifests/
function setup-node-termination-handler-manifest {
local -r nth_manifest="/etc/kubernetes/$1/$2/daemonset.yaml"
if [[ -n "${NODE_TERMINATION_HANDLER_IMAGE}" ]]; then
sed -i "s|image:.*|image: ${NODE_TERMINATION_HANDLER_IMAGE}|" "${nth_manifest}"
fi
}
# Setups manifests for ingress controller and gce-specific policies for service controller.
@ -2523,16 +2651,6 @@ function start-lb-controller {
fi
}
# Starts rescheduler.
function start-rescheduler {
if [[ "${ENABLE_RESCHEDULER:-}" == "true" ]]; then
echo "Start Rescheduler"
prepare-log-file /var/log/rescheduler.log
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/rescheduler.manifest" \
/etc/kubernetes/manifests/
fi
}
# Setup working directory for kubelet.
function setup-kubelet-dir {
echo "Making /var/lib/kubelet executable for kubelet"
@ -2638,6 +2756,21 @@ EOF
function main() {
echo "Start to configure instance for kubernetes"
readonly UUID_MNT_PREFIX="/mnt/disks/by-uuid/google-local-ssds"
readonly UUID_BLOCK_PREFIX="/dev/disk/by-uuid/google-local-ssds"
readonly COREDNS_AUTOSCALER="Deployment/coredns"
readonly KUBEDNS_AUTOSCALER="Deployment/kube-dns"
# Resource requests of master components.
KUBE_CONTROLLER_MANAGER_CPU_REQUEST="${KUBE_CONTROLLER_MANAGER_CPU_REQUEST:-200m}"
KUBE_SCHEDULER_CPU_REQUEST="${KUBE_SCHEDULER_CPU_REQUEST:-75m}"
# Use --retry-connrefused opt only if it's supported by curl.
CURL_RETRY_CONNREFUSED=""
if curl --help | grep -q -- '--retry-connrefused'; then
CURL_RETRY_CONNREFUSED='--retry-connrefused'
fi
KUBE_HOME="/home/kubernetes"
CONTAINERIZED_MOUNTER_HOME="${KUBE_HOME}/containerized_mounter"
PV_RECYCLER_OVERRIDE_TEMPLATE="${KUBE_HOME}/kube-manifests/kubernetes/pv-recycler-template.yaml"
@ -2667,9 +2800,9 @@ function main() {
fi
# generate the controller manager, scheduler and cluster autoscaler tokens here since they are only used on the master.
KUBE_CONTROLLER_MANAGER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_SCHEDULER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_CLUSTER_AUTOSCALER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_CONTROLLER_MANAGER_TOKEN="$(secure_random 32)"
KUBE_SCHEDULER_TOKEN="$(secure_random 32)"
KUBE_CLUSTER_AUTOSCALER_TOKEN="$(secure_random 32)"
setup-os-params
config-ip-firewall
@ -2714,14 +2847,11 @@ function main() {
start-kube-addons
start-cluster-autoscaler
start-lb-controller
start-rescheduler
apply-encryption-config &
else
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
start-kube-proxy
fi
if [[ "${PREPULL_E2E_IMAGES:-}" == "true" ]]; then
start-image-puller
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
start-node-problem-detector
fi
@ -2732,9 +2862,6 @@ function main() {
echo "Done for the configuration for kubernetes"
}
# use --source-only to test functions defined in this script.
if [[ "$#" -eq 1 && "${1}" == "--source-only" ]]; then
:
else
main "${@}"
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
main "${@}"
fi

View File

@ -28,8 +28,8 @@ DEFAULT_CNI_VERSION="v0.6.0"
DEFAULT_CNI_SHA1="d595d3ded6499a64e8dac02466e2f5f2ce257c9f"
DEFAULT_NPD_VERSION="v0.5.0"
DEFAULT_NPD_SHA1="650ecfb2ae495175ee43706d0bd862a1ea7f1395"
DEFAULT_CRICTL_VERSION="v1.11.0"
DEFAULT_CRICTL_SHA1="8f5142b985d314cdebb51afd55054d5ec00c442a"
DEFAULT_CRICTL_VERSION="v1.12.0"
DEFAULT_CRICTL_SHA1="82ef8b44849f9da0589c87e9865d4716573eec7f"
DEFAULT_MOUNTER_TAR_SHA="8003b798cf33c7f91320cd6ee5cec4fa22244571"
###
@ -247,6 +247,11 @@ function install-crictl {
fi
local -r crictl="crictl-${crictl_version}-linux-amd64"
# Create crictl config file.
cat > /etc/crictl.yaml <<EOF
runtime-endpoint: ${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock}
EOF
if is-preloaded "${crictl}" "${crictl_sha1}"; then
echo "crictl is preloaded"
return
@ -257,11 +262,6 @@ function install-crictl {
download-or-bust "${crictl_sha1}" "${crictl_path}/${crictl}"
mv "${KUBE_HOME}/${crictl}" "${KUBE_BIN}/crictl"
chmod a+x "${KUBE_BIN}/crictl"
# Create crictl config file.
cat > /etc/crictl.yaml <<EOF
runtime-endpoint: ${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock}
EOF
}
function install-exec-auth-plugin {
@ -275,6 +275,14 @@ function install-exec-auth-plugin {
download-or-bust "${plugin_sha1}" "${plugin_url}"
mv "${KUBE_HOME}/gke-exec-auth-plugin" "${KUBE_BIN}/gke-exec-auth-plugin"
chmod a+x "${KUBE_BIN}/gke-exec-auth-plugin"
if [[ ! "${EXEC_AUTH_PLUGIN_LICENSE_URL:-}" ]]; then
return
fi
local -r license_url="${EXEC_AUTH_PLUGIN_LICENSE_URL}"
echo "Downloading gke-exec-auth-plugin license"
download-or-bust "" "${license_url}"
mv "${KUBE_HOME}/LICENSE" "${KUBE_BIN}/gke-exec-auth-plugin-license"
}
function install-kube-manifests {
@ -421,6 +429,7 @@ function install-kube-binary-config {
install-crictl
if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then
# TODO(awly): include the binary and license in the OS image.
install-exec-auth-plugin
fi

View File

@ -125,7 +125,7 @@ func (c *ManifestTestCase) mustCreateEnv(envTemplate string, env interface{}) {
func (c *ManifestTestCase) mustInvokeFunc(envTemplate string, env interface{}) {
c.mustCreateEnv(envTemplate, env)
args := fmt.Sprintf("source %s ; source %s --source-only ; %s", c.envScriptPath, configureHelperScriptName, c.manifestFuncName)
args := fmt.Sprintf("source %s ; source %s; %s", c.envScriptPath, configureHelperScriptName, c.manifestFuncName)
cmd := exec.Command("bash", "-c", args)
bs, err := cmd.CombinedOutput()
@ -143,7 +143,7 @@ func (c *ManifestTestCase) mustLoadPodFromManifest() {
}
if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), json, &c.pod); err != nil {
c.t.Fatalf("Failed to decode manifest: %v", err)
c.t.Fatalf("Failed to decode manifest:\n%s\nerror: %v", json, err)
}
}

View File

@ -83,9 +83,6 @@ function create-master-instance-internal() {
retries=30
sleep_sec=60
fi
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
gcloud="gcloud beta"
fi
local -r master_name="${1}"
local -r address="${2:-}"

Some files were not shown because too many files have changed in this diff Show More