vendor updates

This commit is contained in:
Serguei Bezverkhi
2018-03-06 17:33:18 -05:00
parent 4b3ebc171b
commit e9033989a0
5854 changed files with 248382 additions and 119809 deletions

View File

@ -1,6 +1,6 @@
package(default_visibility = ["//visibility:public"])
load("@io_bazel//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
filegroup(
name = "addon-srcs",
@ -16,10 +16,10 @@ filegroup(
pkg_tar(
name = "addons",
extension = "tar.gz",
files = [
srcs = [
":addon-srcs",
],
extension = "tar.gz",
mode = "0644",
strip_prefix = ".",
)

View File

@ -1,6 +1,13 @@
### Version 8.6 (Tue February 20 2018 Zihong Zheng <zihongz@google.com>)
- Allow reconcile/ensure loop to work with resource under non-kube-system namespace.
- Update kubectl to v1.9.3.
### Version 8.4 (Thu November 30 2017 zou nengren @zouyee)
- Update kubectl to v1.8.4.
### Version 6.5 (Wed October 15 2017 Daniel Kłobuszewski <danielmk@google.com>)
- Support for HA masters.
### Version 6.4-beta.2 (Mon June 12 2017 Jeff Grafton <jgrafton@google.com>)
- Update kubectl to v1.6.4.
- Refresh base images.

View File

@ -12,11 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
IMAGE=gcr.io/google-containers/kube-addon-manager
IMAGE=staging-k8s.gcr.io/kube-addon-manager
ARCH?=amd64
TEMP_DIR:=$(shell mktemp -d)
VERSION=v8.4
KUBECTL_VERSION?=v1.8.4
VERSION=v8.6
KUBECTL_VERSION?=v1.9.3
ifeq ($(ARCH),amd64)
BASEIMAGE?=bashell/alpine-bash
@ -46,12 +46,12 @@ build:
docker build --pull -t $(IMAGE)-$(ARCH):$(VERSION) $(TEMP_DIR)
push: build
gcloud docker -- push $(IMAGE)-$(ARCH):$(VERSION)
docker push $(IMAGE)-$(ARCH):$(VERSION)
ifeq ($(ARCH),amd64)
# Backward compatibility. TODO: deprecate this image tag
docker rmi $(IMAGE):$(VERSION) 2>/dev/null || true
docker tag $(IMAGE)-$(ARCH):$(VERSION) $(IMAGE):$(VERSION)
gcloud docker -- push $(IMAGE):$(VERSION)
docker push $(IMAGE):$(VERSION)
endif
clean:

View File

@ -0,0 +1,2 @@
reviewers:
- mrhohn

View File

@ -21,7 +21,6 @@ In future release (after one year), Addon Manager may not respect it anymore. Ad
have this label but without `addonmanager.kubernetes.io/mode=EnsureExists` will be
treated as "reconcile class addons" for now.
- Resources under `$ADDON_PATH` need to have either one of these two labels.
Meanwhile namespaced resources need to be in `kube-system` namespace.
Otherwise it will be omitted.
- The above label and namespace rule does not stand for `/opt/namespace.yaml` and
resources under `/etc/kubernetes/admission-controls/`. addon-manager will attempt to
@ -40,20 +39,20 @@ The `addon-manager` is built for multiple architectures.
```console
# Build for linux/amd64 (default)
$ make push ARCH=amd64
# ---> gcr.io/google-containers/kube-addon-manager-amd64:VERSION
# ---> gcr.io/google-containers/kube-addon-manager:VERSION (image with backwards-compatible naming)
# ---> staging-k8s.gcr.io/kube-addon-manager-amd64:VERSION
# ---> staging-k8s.gcr.io/kube-addon-manager:VERSION (image with backwards-compatible naming)
$ make push ARCH=arm
# ---> gcr.io/google-containers/kube-addon-manager-arm:VERSION
# ---> staging-k8s.gcr.io/kube-addon-manager-arm:VERSION
$ make push ARCH=arm64
# ---> gcr.io/google-containers/kube-addon-manager-arm64:VERSION
# ---> staging-k8s.gcr.io/kube-addon-manager-arm64:VERSION
$ make push ARCH=ppc64le
# ---> gcr.io/google-containers/kube-addon-manager-ppc64le:VERSION
# ---> staging-k8s.gcr.io/kube-addon-manager-ppc64le:VERSION
$ make push ARCH=s390x
# ---> gcr.io/google-containers/kube-addon-manager-s390x:VERSION
# ---> staging-k8s.gcr.io/kube-addon-manager-s390x:VERSION
```
If you don't want to push the images, run `make` or `make build` instead

View File

@ -124,12 +124,12 @@ function reconcile_addons() {
# Filter out `configured` message to not noisily log.
# `created`, `pruned` and errors will be logged.
log INFO "== Reconciling with deprecated label =="
${KUBECTL} ${KUBECTL_OPTS} apply --namespace=${SYSTEM_NAMESPACE} -f ${ADDON_PATH} \
${KUBECTL} ${KUBECTL_OPTS} apply -f ${ADDON_PATH} \
-l ${CLUSTER_SERVICE_LABEL}=true,${ADDON_MANAGER_LABEL}!=EnsureExists \
--prune=true --recursive | grep -v configured
log INFO "== Reconciling with addon-manager label =="
${KUBECTL} ${KUBECTL_OPTS} apply --namespace=${SYSTEM_NAMESPACE} -f ${ADDON_PATH} \
${KUBECTL} ${KUBECTL_OPTS} apply -f ${ADDON_PATH} \
-l ${CLUSTER_SERVICE_LABEL}!=true,${ADDON_MANAGER_LABEL}=Reconcile \
--prune=true --recursive | grep -v configured
@ -139,7 +139,7 @@ function reconcile_addons() {
function ensure_addons() {
# Create objects already exist should fail.
# Filter out `AlreadyExists` message to not noisily log.
${KUBECTL} ${KUBECTL_OPTS} create --namespace=${SYSTEM_NAMESPACE} -f ${ADDON_PATH} \
${KUBECTL} ${KUBECTL_OPTS} create -f ${ADDON_PATH} \
-l ${ADDON_MANAGER_LABEL}=EnsureExists --recursive 2>&1 | grep -v AlreadyExists
log INFO "== Kubernetes addon ensure completed at $(date -Is) =="
@ -155,7 +155,7 @@ function is_leader() {
fi
KUBE_CONTROLLER_MANAGER_LEADER=`${KUBECTL} -n kube-system get ep kube-controller-manager \
-o go-template=$'{{index .metadata.annotations "control-plane.alpha.kubernetes.io/leader"}}' \
| sed 's/^.*"holderIdentity":"\([^"]*\)".*/\1/'`
| sed 's/^.*"holderIdentity":"\([^"]*\)".*/\1/' | awk -F'_' '{print $1}'`
# If there was any problem with getting the leader election results, var will
# be empty. Since it's better to have multiple addon managers than no addon
# managers at all, we're going to assume that we're the leader in such case.

View File

@ -20,6 +20,7 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-node-critical
nodeSelector:
projectcalico.org/ds-ready: "true"
hostNetwork: true
@ -32,7 +33,7 @@ spec:
# container programs network policy and routes on each
# host.
- name: calico-node
image: calico/node:v2.6.1
image: gcr.io/projectcalico-org/node:v2.6.7
env:
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
@ -86,7 +87,7 @@ spec:
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:v1.11.0
image: gcr.io/projectcalico-org/cni:v1.11.2
command: ["/install-cni.sh"]
env:
- name: CNI_CONF_NAME
@ -149,5 +150,10 @@ spec:
hostPath:
path: /etc/cni/net.d
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
# Make sure calico/node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
- key: CriticalAddonsOnly
operator: Exists

View File

@ -16,8 +16,9 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- image: gcr.io/google_containers/cpvpa-amd64:v0.6.0
- image: k8s.gcr.io/cpvpa-amd64:v0.6.0
name: autoscaler
command:
- /cpvpa

View File

@ -16,13 +16,14 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
hostNetwork: true
serviceAccountName: calico
containers:
- image: calico/typha:v0.5.1
- image: gcr.io/projectcalico-org/typha:v0.5.6
name: calico-typha
ports:
- containerPort: 5473

View File

@ -16,8 +16,9 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.2
- image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2
name: autoscaler
command:
- /cluster-proportional-autoscaler

View File

@ -16,8 +16,9 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- image: gcr.io/google_containers/cpvpa-amd64:v0.6.0
- image: k8s.gcr.io/cpvpa-amd64:v0.6.0
name: autoscaler
command:
- /cpvpa

View File

@ -1,4 +1,4 @@
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: l7-default-backend
@ -24,7 +24,7 @@ spec:
# Any image is permissible as long as:
# 1. It serves a 404 page at /
# 2. It serves 200 on a /healthz endpoint
image: gcr.io/google_containers/defaultbackend:1.3
image: k8s.gcr.io/defaultbackend:1.4
livenessProbe:
httpGet:
path: /healthz

View File

@ -1,16 +1,3 @@
{% set base_metrics_memory = "140Mi" -%}
{% set base_metrics_cpu = "80m" -%}
{% set base_eventer_memory = "190Mi" -%}
{% set metrics_memory_per_node = 4 -%}
{% set metrics_cpu_per_node = 0.5 -%}
{% set eventer_memory_per_node = 500 -%}
{% set num_nodes = pillar.get('num_nodes', -1) -%}
{% set nanny_memory = "90Mi" -%}
{% set nanny_memory_per_node = 200 -%}
{% if num_nodes >= 0 -%}
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
{% endif -%}
apiVersion: v1
kind: ServiceAccount
metadata:
@ -70,8 +57,9 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
- image: k8s.gcr.io/heapster-amd64:v1.5.0
name: heapster
livenessProbe:
httpGet:
@ -84,13 +72,13 @@ spec:
- /heapster
- --source=kubernetes.summary_api:''
- --sink=gcm
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
- image: k8s.gcr.io/heapster-amd64:v1.5.0
name: eventer
command:
- /eventer
- --source=kubernetes:''
- --sink=gcl
- image: gcr.io/google_containers/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.1
name: heapster-nanny
resources:
limits:
@ -101,7 +89,7 @@ spec:
memory: {{ nanny_memory }}
volumeMounts:
- name: heapster-config-volume
mountMath: /etc/config
mountPath: /etc/config
env:
- name: MY_POD_NAME
valueFrom:
@ -123,7 +111,7 @@ spec:
- --container=heapster
- --poll-period=300000
- --estimator=exponential
- image: gcr.io/google_containers/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.1
name: eventer-nanny
resources:
limits:
@ -143,7 +131,7 @@ spec:
fieldPath: metadata.namespace
volumeMounts:
- name: eventer-config-volume
mountMath: /etc/config
mountPath: /etc/config
command:
- /pod_nanny
- --config-dir=/etc/config
@ -160,7 +148,6 @@ spec:
- name: heapster-config-volume
configMap:
name: heapster-config
volumes:
- name: eventer-config-volume
configMap:
name: eventer-config

View File

@ -1,16 +1,3 @@
{% set base_metrics_memory = "140Mi" -%}
{% set base_metrics_cpu = "80m" -%}
{% set base_eventer_memory = "190Mi" -%}
{% set metrics_memory_per_node = 4 -%}
{% set metrics_cpu_per_node = 0.5 -%}
{% set eventer_memory_per_node = 500 -%}
{% set num_nodes = pillar.get('num_nodes', -1) -%}
{% set nanny_memory = "90Mi" -%}
{% set nanny_memory_per_node = 200 -%}
{% if num_nodes >= 0 -%}
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
{% endif -%}
apiVersion: v1
kind: ServiceAccount
metadata:
@ -70,9 +57,9 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
- image: k8s.gcr.io/heapster-amd64:v1.5.0
name: heapster
livenessProbe:
httpGet:
@ -86,13 +73,13 @@ spec:
- --source=kubernetes.summary_api:''
- --sink=influxdb:http://monitoring-influxdb:8086
- --sink=gcm:?metrics=autoscaling
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
- image: k8s.gcr.io/heapster-amd64:v1.5.0
name: eventer
command:
- /eventer
- --source=kubernetes:''
- --sink=gcl
- image: gcr.io/google_containers/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.1
name: heapster-nanny
resources:
limits:
@ -125,7 +112,7 @@ spec:
- --container=heapster
- --poll-period=300000
- --estimator=exponential
- image: gcr.io/google_containers/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.1
name: eventer-nanny
resources:
limits:

View File

@ -1,16 +1,3 @@
{% set base_metrics_memory = "140Mi" -%}
{% set base_metrics_cpu = "80m" -%}
{% set base_eventer_memory = "190Mi" -%}
{% set metrics_memory_per_node = 4 -%}
{% set metrics_cpu_per_node = 0.5|float -%}
{% set eventer_memory_per_node = 500 -%}
{% set num_nodes = pillar.get('num_nodes', -1) -%}
{% set nanny_memory = "90Mi" -%}
{% set nanny_memory_per_node = 200 -%}
{% if num_nodes >= 0 -%}
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
{% endif -%}
apiVersion: v1
kind: ServiceAccount
metadata:
@ -70,8 +57,9 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
- image: k8s.gcr.io/heapster-amd64:v1.5.0
name: heapster
livenessProbe:
httpGet:
@ -84,13 +72,13 @@ spec:
- /heapster
- --source=kubernetes.summary_api:''
- --sink=influxdb:http://monitoring-influxdb:8086
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
- image: k8s.gcr.io/heapster-amd64:v1.5.0
name: eventer
command:
- /eventer
- --source=kubernetes:''
- --sink=influxdb:http://monitoring-influxdb:8086
- image: gcr.io/google_containers/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.1
name: heapster-nanny
resources:
limits:
@ -123,7 +111,7 @@ spec:
- --container=heapster
- --poll-period=300000
- --estimator=exponential
- image: gcr.io/google_containers/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.1
name: eventer-nanny
resources:
limits:

View File

@ -22,6 +22,7 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
@ -29,7 +30,7 @@ spec:
operator: "Exists"
containers:
- name: influxdb
image: gcr.io/google_containers/heapster-influxdb-amd64:v1.3.3
image: k8s.gcr.io/heapster-influxdb-amd64:v1.3.3
resources:
limits:
cpu: 100m
@ -46,7 +47,7 @@ spec:
- name: influxdb-persistent-storage
mountPath: /data
- name: grafana
image: gcr.io/google_containers/heapster-grafana-amd64:v4.4.3
image: k8s.gcr.io/heapster-grafana-amd64:v4.4.3
env:
resources:
# keep request = limit to keep this container in guaranteed class
@ -71,7 +72,7 @@ spec:
- name: GF_AUTH_ANONYMOUS_ORG_ROLE
value: Admin
- name: GF_SERVER_ROOT_URL
value: /api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/
value: /api/v1/namespaces/kube-system/services/monitoring-grafana/proxy/
ports:
- name: ui
containerPort: 3000

View File

@ -1,14 +1,3 @@
{% set base_metrics_memory = "140Mi" -%}
{% set base_metrics_cpu = "80m" -%}
{% set metrics_memory_per_node = 4 -%}
{% set metrics_cpu_per_node = 0.5 -%}
{% set num_nodes = pillar.get('num_nodes', -1) -%}
{% set nanny_memory = "90Mi" -%}
{% set nanny_memory_per_node = 200 -%}
{% if num_nodes >= 0 -%}
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
{% endif -%}
apiVersion: v1
kind: ServiceAccount
metadata:
@ -55,8 +44,9 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
- image: k8s.gcr.io/heapster-amd64:v1.5.0
name: heapster
livenessProbe:
httpGet:
@ -71,7 +61,7 @@ spec:
- --sink=stackdriver:?cluster_name={{ cluster_name }}&use_old_resources={{ use_old_resources }}&use_new_resources={{ use_new_resources }}&min_interval_sec=100&batch_export_timeout_sec=110
# BEGIN_PROMETHEUS_TO_SD
- name: prom-to-sd
image: gcr.io/google-containers/prometheus-to-sd:v0.2.2
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
command:
- /monitor
- --source=heapster:http://localhost:8082?whitelisted=stackdriver_requests_count,stackdriver_timeseries_count
@ -89,7 +79,7 @@ spec:
fieldRef:
fieldPath: metadata.namespace
# END_PROMETHEUS_TO_SD
- image: gcr.io/google_containers/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.1
name: heapster-nanny
resources:
limits:

View File

@ -1,14 +1,3 @@
{% set base_metrics_memory = "140Mi" -%}
{% set metrics_memory_per_node = 4 -%}
{% set base_metrics_cpu = "80m" -%}
{% set metrics_cpu_per_node = 0.5 -%}
{% set num_nodes = pillar.get('num_nodes', -1) -%}
{% set nanny_memory = "90Mi" -%}
{% set nanny_memory_per_node = 200 -%}
{% if num_nodes >= 0 -%}
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
{% endif -%}
apiVersion: v1
kind: ServiceAccount
metadata:
@ -55,8 +44,9 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
- image: k8s.gcr.io/heapster-amd64:v1.5.0
name: heapster
livenessProbe:
httpGet:
@ -68,7 +58,7 @@ spec:
command:
- /heapster
- --source=kubernetes.summary_api:''
- image: gcr.io/google_containers/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.1
name: heapster-nanny
resources:
limits:

View File

@ -0,0 +1,6 @@
approvers:
- floreks
- maciaszczykm
reviewers:
- floreks
- maciaszczykm

View File

@ -7,7 +7,7 @@ metadata:
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: apps/v1beta2
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubernetes-dashboard
@ -27,9 +27,10 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- name: kubernetes-dashboard
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.8.0
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
resources:
limits:
cpu: 100m

View File

@ -7,10 +7,6 @@ metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
@ -26,6 +22,10 @@ rules:
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding

View File

@ -8,3 +8,14 @@ metadata:
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
# Allows editing resource and makes sure it is created first.
addonmanager.kubernetes.io/mode: EnsureExists
name: kubernetes-dashboard-key-holder
namespace: kube-system
type: Opaque

View File

@ -36,7 +36,7 @@ spec:
hostPath:
path: /dev
containers:
- image: "gcr.io/google-containers/nvidia-gpu-device-plugin@sha256:0e79da6998a61257585e0d3fb5848240129f0fa5b4ad972dfed4049448093c33"
- image: "k8s.gcr.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e"
command: ["/usr/bin/nvidia-gpu-device-plugin", "-logtostderr"]
name: nvidia-gpu-device-plugin
resources:

View File

@ -58,7 +58,7 @@ roleRef:
apiGroup: rbac.authorization.k8s.io
---
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-dns-autoscaler
@ -68,6 +68,9 @@ metadata:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: kube-dns-autoscaler
template:
metadata:
labels:
@ -75,9 +78,10 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- name: autoscaler
image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.2-r2
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2
resources:
requests:
cpu: "20m"

View File

@ -57,12 +57,13 @@ data:
Corefile: |
.:53 {
errors
log
health
kubernetes __PILLAR__DNS__DOMAIN__ __PILLAR__CLUSTER_CIDR__ {
pods insecure
upstream /etc/resolv.conf
fallthrough in-addr.arpa ip6.arpa
}
prometheus
prometheus :9153
proxy . /etc/resolv.conf
cache 30
}
@ -78,7 +79,11 @@ metadata:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: coredns
@ -93,9 +98,21 @@ spec:
effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- coredns
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: coredns/coredns:1.0.1
image: coredns/coredns:1.0.4
imagePullPolicy: IfNotPresent
resources:
limits:
@ -114,9 +131,6 @@ spec:
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
@ -156,6 +170,3 @@ spec:
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP

View File

@ -57,12 +57,13 @@ data:
Corefile: |
.:53 {
errors
log
health
kubernetes {{ pillar['dns_domain'] }} {{ pillar['service_cluster_ip_range'] }} {
pods insecure
upstream /etc/resolv.conf
fallthrough in-addr.arpa ip6.arpa
}
prometheus
prometheus :9153
proxy . /etc/resolv.conf
cache 30
}
@ -78,7 +79,11 @@ metadata:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: coredns
@ -93,9 +98,21 @@ spec:
effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- coredns
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: coredns/coredns:1.0.1
image: coredns/coredns:1.0.4
imagePullPolicy: IfNotPresent
resources:
limits:
@ -114,9 +131,6 @@ spec:
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
@ -156,6 +170,3 @@ spec:
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP

View File

@ -57,12 +57,13 @@ data:
Corefile: |
.:53 {
errors
log
health
kubernetes $DNS_DOMAIN $SERVICE_CLUSTER_IP_RANGE {
pods insecure
upstream /etc/resolv.conf
fallthrough in-addr.arpa ip6.arpa
}
prometheus
prometheus :9153
proxy . /etc/resolv.conf
cache 30
}
@ -78,7 +79,11 @@ metadata:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: coredns
@ -93,9 +98,21 @@ spec:
effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- coredns
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: coredns/coredns:1.0.1
image: coredns/coredns:1.0.4
imagePullPolicy: IfNotPresent
resources:
limits:
@ -114,9 +131,6 @@ spec:
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
@ -156,6 +170,3 @@ spec:
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP

View File

@ -84,6 +84,7 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
@ -94,7 +95,7 @@ spec:
optional: true
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.8
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -145,7 +146,7 @@ spec:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.8
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@ -184,7 +185,7 @@ spec:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.8
livenessProbe:
httpGet:
path: /metrics

View File

@ -84,6 +84,7 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
@ -94,7 +95,7 @@ spec:
optional: true
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.8
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -145,7 +146,7 @@ spec:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.8
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@ -184,7 +185,7 @@ spec:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.8
livenessProbe:
httpGet:
path: /metrics

View File

@ -84,6 +84,7 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
@ -94,7 +95,7 @@ spec:
optional: true
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.8
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -145,7 +146,7 @@ spec:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.8
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@ -184,7 +185,7 @@ spec:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.8
livenessProbe:
httpGet:
path: /metrics

View File

@ -18,9 +18,10 @@ metadata:
labels:
k8s-app: etcd-empty-dir-cleanup
spec:
priorityClassName: system-node-critical
serviceAccountName: etcd-empty-dir-cleanup
hostNetwork: true
dnsPolicy: Default
containers:
- name: etcd-empty-dir-cleanup
image: gcr.io/google-containers/etcd-empty-dir-cleanup:3.0.14.0
image: k8s.gcr.io/etcd-empty-dir-cleanup:3.1.10.0

View File

@ -8,7 +8,7 @@ is a graphical interface for viewing and querying the logs stored in
Elasticsearch.
**Note:** this addon should **not** be used as-is in production. This is
an example and you should treat is as such. Please see at least the
an example and you should treat it as such. Please see at least the
[Security](#security) and the [Storage](#storage) sections for more
information.
@ -19,9 +19,9 @@ a Deployment, but allows for maintaining state on storage volumes.
### Security
Elasticsearch has capabilities to enable authorization using
Elasticsearch has capabilities to enable authorization using the
[X-Pack plugin][xPack]. See configuration parameter `xpack.security.enabled`
in Elasticsearch and Kibana configurations. It can also be set via
in Elasticsearch and Kibana configurations. It can also be set via the
`XPACK_SECURITY_ENABLED` env variable. After enabling the feature,
follow [official documentation][setupCreds] to set up credentials in
Elasticsearch and Kibana. Don't forget to propagate those credentials also to
@ -31,7 +31,7 @@ and [Secrets][secret] to store credentials in the Kubernetes apiserver.
### Initialization
The Elasticsearch Statefulset manifest specifies that there shall be an
The Elasticsearch StatefulSet manifest specifies that there shall be an
[init container][initContainer] executing before Elasticsearch containers
themselves, in order to ensure that the kernel state variable
`vm.max_map_count` is at least 262144, since this is a requirement of
@ -61,7 +61,7 @@ Learn more in the [official Kubernetes documentation][k8sElasticsearchDocs].
Since Fluentd talks to the Elasticsearch service inside the cluster, instances
on masters won't work, because masters have no kube-proxy. Don't mark masters
with a label mentioned in the previous paragraph or add a taint on them to
with the label mentioned in the previous paragraph or add a taint on them to
avoid Fluentd pods scheduling there.
[fluentd]: http://www.fluentd.org/
@ -71,7 +71,7 @@ avoid Fluentd pods scheduling there.
[setupCreds]: https://www.elastic.co/guide/en/x-pack/current/setting-up-authentication.html#reset-built-in-user-passwords
[fluentdCreds]: https://github.com/uken/fluent-plugin-elasticsearch#user-password-path-scheme-ssl_verify
[fluentdEnvVar]: https://docs.fluentd.org/v0.12/articles/faq#how-can-i-use-environment-variables-to-configure-parameters-dynamically
[configMap]: https://kubernetes.io/docs/tasks/configure-pod-container/configmap/
[configMap]: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/
[secret]: https://kubernetes.io/docs/concepts/configuration/secret/
[statefulSet]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset
[initContainer]: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/

View File

@ -8,8 +8,7 @@ load(
go_binary(
name = "es-image",
importpath = "k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image",
library = ":go_default_library",
embed = [":go_default_library"],
)
go_library(

View File

@ -14,7 +14,7 @@
.PHONY: binary build push
PREFIX = gcr.io/google-containers
PREFIX = staging-k8s.gcr.io
IMAGE = elasticsearch
TAG = v5.6.4
@ -22,7 +22,7 @@ build:
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .
push:
gcloud docker -- push $(PREFIX)/$(IMAGE):$(TAG)
docker push $(PREFIX)/$(IMAGE):$(TAG)
binary:
CGO_ENABLED=0 GOOS=linux go build -a -ldflags "-w" elasticsearch_logging_discovery.go

View File

@ -86,7 +86,7 @@ func main() {
serviceName = "elasticsearch-logging"
}
// Look for endpoints associated with the Elasticsearch loggging service.
// Look for endpoints associated with the Elasticsearch logging service.
// First wait for the service to become available.
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
elasticsearch, err = client.Core().Services(namespace).Get(serviceName, metav1.GetOptions{})

View File

@ -47,7 +47,7 @@ roleRef:
apiGroup: ""
---
# Elasticsearch deployment itself
apiVersion: apps/v1beta2
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: elasticsearch-logging
@ -73,7 +73,7 @@ spec:
spec:
serviceAccountName: elasticsearch-logging
containers:
- image: gcr.io/google-containers/elasticsearch:v5.6.4
- image: k8s.gcr.io/elasticsearch:v5.6.4
name: elasticsearch-logging
resources:
# need more cpu upon initialization, therefore burstable class

View File

@ -1,6 +1,16 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: fluentd-es-config-v0.1.4
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
data:
system.conf: |-
<system>
root_dir /tmp/fluentd-buffers/
</system>
containers.input.conf: |-
# This configuration file for Fluentd / td-agent is used
# to watch changes to Docker log files. The kubelet creates symlinks that
@ -101,39 +111,46 @@ data:
# CRI Log Example:
# 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
<source>
type tail
@id fluentd-containers.log
@type tail
path /var/log/containers/*.log
pos_file /var/log/es-containers.log.pos
time_format %Y-%m-%dT%H:%M:%S.%NZ
tag kubernetes.*
tag raw.kubernetes.*
format json
read_from_head true
format multi_format
<pattern>
format json
time_key time
time_format %Y-%m-%dT%H:%M:%S.%NZ
</pattern>
<pattern>
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
time_format %Y-%m-%dT%H:%M:%S.%N%:z
</pattern>
</source>
# Detect exceptions in the log output and forward them as one log entry.
<match raw.kubernetes.**>
@id raw.kubernetes
@type detect_exceptions
remove_tag_prefix raw
message log
stream stream
multiline_flush_interval 5
max_bytes 500000
max_lines 1000
</match>
system.input.conf: |-
# Example:
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
<source>
type tail
@id minion
@type tail
format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
time_format %Y-%m-%d %H:%M:%S
path /var/log/salt/minion
pos_file /var/log/es-salt.pos
pos_file /var/log/salt.pos
tag salt
</source>
# Example:
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
<source>
type tail
@id startupscript.log
@type tail
format syslog
path /var/log/startupscript.log
pos_file /var/log/es-startupscript.log.pos
@ -143,8 +160,10 @@ data:
# Examples:
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
# TODO(random-liu): Remove this after cri container runtime rolls out.
<source>
type tail
@id docker.log
@type tail
format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
path /var/log/docker.log
pos_file /var/log/es-docker.log.pos
@ -154,7 +173,8 @@ data:
# Example:
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
<source>
type tail
@id etcd.log
@type tail
# Not parsing this, because it doesn't have anything particularly useful to
# parse out of it (like severities).
format none
@ -170,7 +190,8 @@ data:
# Example:
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
<source>
type tail
@id kubelet.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -184,7 +205,8 @@ data:
# Example:
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
<source>
type tail
@id kube-proxy.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -198,7 +220,8 @@ data:
# Example:
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
<source>
type tail
@id kube-apiserver.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -212,7 +235,8 @@ data:
# Example:
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
<source>
type tail
@id kube-controller-manager.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -226,7 +250,8 @@ data:
# Example:
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
<source>
type tail
@id kube-scheduler.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -240,7 +265,8 @@ data:
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
<source>
type tail
@id rescheduler.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -254,7 +280,8 @@ data:
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
type tail
@id glbc.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -268,7 +295,8 @@ data:
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
type tail
@id cluster-autoscaler.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -280,34 +308,61 @@ data:
</source>
# Logs from systemd-journal for interesting services.
# TODO(random-liu): Remove this after cri container runtime rolls out.
<source>
type systemd
@id journald-docker
@type systemd
filters [{ "_SYSTEMD_UNIT": "docker.service" }]
pos_file /var/log/gcp-journald-docker.pos
<storage>
@type local
persistent true
</storage>
read_from_head true
tag docker
</source>
<source>
type systemd
@id journald-container-runtime
@type systemd
filters [{ "_SYSTEMD_UNIT": "{{ container_runtime }}.service" }]
<storage>
@type local
persistent true
</storage>
read_from_head true
tag container-runtime
</source>
<source>
@id journald-kubelet
@type systemd
filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
pos_file /var/log/gcp-journald-kubelet.pos
<storage>
@type local
persistent true
</storage>
read_from_head true
tag kubelet
</source>
<source>
type systemd
@id journald-node-problem-detector
@type systemd
filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
pos_file /var/log/gcp-journald-node-problem-detector.pos
<storage>
@type local
persistent true
</storage>
read_from_head true
tag node-problem-detector
</source>
forward.input.conf: |-
# Takes the messages sent over TCP
<source>
type forward
@type forward
</source>
monitoring.conf: |-
# Prometheus Exporter Plugin
# input plugin that exports metrics
@ -342,32 +397,32 @@ data:
host ${hostname}
</labels>
</source>
output.conf: |-
# Enriches records with Kubernetes metadata
<filter kubernetes.**>
type kubernetes_metadata
@type kubernetes_metadata
</filter>
<match **>
type elasticsearch
log_level info
include_tag_key true
host elasticsearch-logging
port 9200
logstash_format true
# Set the chunk limits.
buffer_chunk_limit 2M
buffer_queue_limit 8
flush_interval 5s
# Never wait longer than 5 minutes between retries.
max_retry_wait 30
# Disable the limit on the number of retries (retry forever).
disable_retry_limit
# Use multiple threads for processing.
num_threads 2
@id elasticsearch
@type elasticsearch
@log_level info
include_tag_key true
host elasticsearch-logging
port 9200
logstash_format true
<buffer>
@type file
path /var/log/fluentd-buffers/kubernetes.system.buffer
flush_mode interval
retry_type exponential_backoff
flush_thread_count 2
flush_interval 5s
retry_forever
retry_max_interval 30
chunk_limit_size 2M
queue_limit_length 8
overflow_action block
</buffer>
</match>
metadata:
name: fluentd-es-config-v0.1.1
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile

View File

@ -45,37 +45,38 @@ roleRef:
name: fluentd-es
apiGroup: ""
---
apiVersion: apps/v1beta2
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd-es-v2.0.2
name: fluentd-es-v2.0.4
namespace: kube-system
labels:
k8s-app: fluentd-es
version: v2.0.2
version: v2.0.4
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: fluentd-es
version: v2.0.2
version: v2.0.4
template:
metadata:
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
version: v2.0.2
version: v2.0.4
# This annotation ensures that fluentd does not get evicted if the node
# supports critical pod annotation based priority scheme.
# Note that this does not guarantee admission on the nodes (#40573).
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-node-critical
serviceAccountName: fluentd-es
containers:
- name: fluentd-es
image: gcr.io/google-containers/fluentd-elasticsearch:v2.0.2
image: k8s.gcr.io/fluentd-elasticsearch:v2.0.4
env:
- name: FLUENTD_ARGS
value: --no-supervisor -q
@ -112,4 +113,4 @@ spec:
path: /usr/lib64
- name: config-volume
configMap:
name: fluentd-es-config-v0.1.1
name: fluentd-es-config-v0.1.4

View File

@ -21,6 +21,8 @@
FROM debian:stretch-slim
ARG DEBIAN_FRONTEND=noninteractive
COPY clean-apt /usr/bin
COPY clean-install /usr/bin
COPY Gemfile /Gemfile
@ -29,7 +31,7 @@ COPY Gemfile /Gemfile
# 2. Install fluentd via ruby.
# 3. Remove build dependencies.
# 4. Cleanup leftover caches & files.
RUN BUILD_DEPS="make gcc g++ libc6-dev ruby-dev" \
RUN BUILD_DEPS="make gcc g++ libc6-dev ruby-dev libffi-dev" \
&& clean-install $BUILD_DEPS \
ca-certificates \
libjemalloc1 \

View File

@ -1,10 +1,11 @@
source 'https://rubygems.org'
gem 'fluentd', '~>0.12.32'
gem 'activesupport', '~>4.2.6'
gem 'fluent-plugin-kubernetes_metadata_filter', '~>0.27.0'
gem 'fluent-plugin-elasticsearch', '~>1.9.5'
gem 'fluent-plugin-systemd', '~>0.0.8'
gem 'fluentd', '<=1.1.0'
gem 'activesupport', '~>5.1.4'
gem 'fluent-plugin-kubernetes_metadata_filter', '~>1.0.0'
gem 'fluent-plugin-elasticsearch', '~>2.4.1'
gem 'fluent-plugin-systemd', '~>0.3.1'
gem 'fluent-plugin-detect-exceptions', '~>0.0.9'
gem 'fluent-plugin-prometheus', '~>0.3.0'
gem 'fluent-plugin-multi-format-parser', '~>0.1.1'
gem 'oj', '~>2.18.1'
gem 'fluent-plugin-multi-format-parser', '~>1.0.0'
gem 'oj', '~>3.3.1.0'

View File

@ -14,12 +14,12 @@
.PHONY: build push
PREFIX = gcr.io/google-containers
PREFIX = staging-k8s.gcr.io
IMAGE = fluentd-elasticsearch
TAG = v2.0.2
TAG = v2.0.4
build:
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .
push:
gcloud docker -- push $(PREFIX)/$(IMAGE):$(TAG)
docker push $(PREFIX)/$(IMAGE):$(TAG)

View File

@ -4,11 +4,11 @@ that collects Docker container log files using [Fluentd][fluentd]
and sends them to an instance of [Elasticsearch][elasticsearch].
This image is designed to be used as part of the [Kubernetes][kubernetes]
cluster bring up process. The image resides at GCR under the name
[gcr.io/google-containers/fluentd-elasticsearch][image].
[k8s.gcr.io/fluentd-elasticsearch][image].
[fluentd]: http://www.fluentd.org/
[elasticsearch]: https://www.elastic.co/products/elasticsearch
[kubernetes]: https://kubernetes.io
[image]: https://gcr.io/google-containers/fluentd-elasticsearch
[image]: https://k8s.gcr.io/fluentd-elasticsearch
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/fluentd-elasticsearch/fluentd-es-image/README.md?pixel)]()

View File

@ -2,7 +2,7 @@
# Do not collect fluentd's own logs to avoid infinite loops.
<match fluent.**>
type null
@type null
</match>
@include /etc/fluent/config.d/*.conf

View File

@ -1,4 +1,4 @@
apiVersion: apps/v1beta2
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana-logging
@ -30,7 +30,7 @@ spec:
- name: ELASTICSEARCH_URL
value: http://elasticsearch-logging:9200
- name: SERVER_BASEPATH
value: /api/v1/proxy/namespaces/kube-system/services/kibana-logging
value: /api/v1/namespaces/kube-system/services/kibana-logging/proxy
- name: XPACK_MONITORING_ENABLED
value: "false"
- name: XPACK_SECURITY_ENABLED

View File

@ -8,4 +8,71 @@ they can be searched, viewed, and analyzed.
Learn more at: https://kubernetes.io/docs/tasks/debug-application-cluster/logging-stackdriver
## Troubleshooting
In Kubernetes clusters in version 1.10.0 or later, fluentd-gcp DaemonSet can be
manually scaled. This is useful e.g. when applications running in the cluster
are sending a large volume of logs (i.e. over 100kB/s), causing fluentd-gcp to
fail with OutOfMemory errors. Conversely, if the applications aren't generating
a lot of logs, it may be useful to reduce the amount of resources consumed by
fluentd-gcp, making these resources available to other applications. To learn
more about Kubernetes resource requests and limits, see the official
documentation ([CPU][cpu], [memory][memory]). The amount of resources requested
by fluentd-gcp on every node in the cluster can be fetched by running following
command:
```
$ kubectl get ds -n kube-system -l k8s-app=fluentd-gcp \
-o custom-columns=NAME:.metadata.name,\
CPU_REQUEST:.spec.template.spec.containers[].resources.requests.cpu,\
MEMORY_REQUEST:.spec.template.spec.containers[].resources.requests.memory,\
MEMORY_LIMIT:.spec.template.spec.containers[].resources.limits.memory
```
This will display an output similar to the following:
```
NAME CPU_REQUEST MEMORY_REQUEST MEMORY_LIMIT
fluentd-gcp-v2.0.15 100m 200Mi 300Mi
```
In order to change those values, a [ScalingPolicy][scalingPolicy] needs to be
defined. Currently, only base values are supported (no automatic scaling). The
ScalingPolicy can be created using kubectl. E.g. to set cpu request to 101m,
memory request to 150Mi and memory limit to 400Mi:
```
$ cat <<EOF | kubectl apply -f -
apiVersion: scalingpolicy.kope.io/v1alpha1
kind: ScalingPolicy
metadata:
name: fluentd-gcp-scaling-policy
namespace: kube-system
spec:
containers:
- name: fluentd-gcp
resources:
requests:
- resource: cpu
base: 101m
- resource: memory
base: 150Mi
limits:
- resource: memory
base: 400Mi
EOF
```
To remove the override and go back to GKE-provided defaults, it is enough to
just remove the ScalingPolicy:
```
$ kubectl delete -n kube-system scalingpolicies.scalingpolicy.kope.io/fluentd-gcp-scaling-policy
```
[cpu]: https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/
[memory]: https://kubernetes.io/docs/tasks/configure-pod-container/assign-memory-resource/
[scalingPolicy]: https://github.com/justinsb/scaler
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/fluentd-gcp/README.md?pixel)]()

View File

@ -29,11 +29,11 @@ subjects:
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: event-exporter-v0.1.7
name: event-exporter-v0.1.8
namespace: kube-system
labels:
k8s-app: event-exporter
version: v0.1.7
version: v0.1.8
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
@ -42,17 +42,18 @@ spec:
metadata:
labels:
k8s-app: event-exporter
version: v0.1.7
version: v0.1.8
spec:
serviceAccountName: event-exporter-sa
containers:
- name: event-exporter
image: gcr.io/google-containers/event-exporter:v0.1.7
image: k8s.gcr.io/event-exporter:v0.1.8
command:
- '/event-exporter'
- /event-exporter
- -sink-opts="-location={{ event_exporter_zone }}"
# BEGIN_PROMETHEUS_TO_SD
- name: prometheus-to-sd-exporter
image: gcr.io/google-containers/prometheus-to-sd:v0.2.2
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
command:
- /monitor
- --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons

View File

@ -46,33 +46,42 @@ data:
# CRI Log Example:
# 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
<source>
type tail
@type tail
path /var/log/containers/*.log
pos_file /var/log/gcp-containers.log.pos
tag reform.*
read_from_head true
format multi_format
<pattern>
format json
time_key time
time_format %Y-%m-%dT%H:%M:%S.%NZ
</pattern>
<pattern>
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
time_format %Y-%m-%dT%H:%M:%S.%N%:z
</pattern>
format none
</source>
<filter reform.**>
type parser
@type parser
key_name message
<parse>
@type multi_format
<pattern>
format json
time_key time
time_format %Y-%m-%dT%H:%M:%S.%NZ
</pattern>
<pattern>
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
time_format %Y-%m-%dT%H:%M:%S.%N%:z
</pattern>
</parse>
</filter>
<filter reform.**>
@type parser
format /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<log>.*)/
reserve_data true
suppress_parse_error_log true
emit_invalid_record_to_error false
key_name log
</filter>
<match reform.**>
type record_reformer
@type record_reformer
enable_ruby true
tag raw.kubernetes.${tag_suffix[4].split('-')[0..-2].join('-')}
</match>
@ -89,21 +98,10 @@ data:
max_lines 1000
</match>
system.input.conf: |-
# Example:
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
<source>
type tail
format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
time_format %Y-%m-%d %H:%M:%S
path /var/log/salt/minion
pos_file /var/log/gcp-salt.pos
tag salt
</source>
# Example:
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
<source>
type tail
@type tail
format syslog
path /var/log/startupscript.log
pos_file /var/log/gcp-startupscript.log.pos
@ -113,8 +111,9 @@ data:
# Examples:
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
# TODO(random-liu): Remove this after cri container runtime rolls out.
<source>
type tail
@type tail
format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
path /var/log/docker.log
pos_file /var/log/gcp-docker.log.pos
@ -124,7 +123,7 @@ data:
# Example:
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
<source>
type tail
@type tail
# Not parsing this, because it doesn't have anything particularly useful to
# parse out of it (like severities).
format none
@ -140,7 +139,7 @@ data:
# Example:
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
<source>
type tail
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -154,7 +153,7 @@ data:
# Example:
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
<source>
type tail
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -168,7 +167,7 @@ data:
# Example:
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
<source>
type tail
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -182,7 +181,7 @@ data:
# Example:
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
<source>
type tail
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -196,7 +195,7 @@ data:
# Example:
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
<source>
type tail
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -210,7 +209,7 @@ data:
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
<source>
type tail
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -224,7 +223,7 @@ data:
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
type tail
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -238,7 +237,7 @@ data:
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
type tail
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -250,8 +249,10 @@ data:
</source>
# Logs from systemd-journal for interesting services.
# TODO(random-liu): Keep this for compatibility, remove this after
# cri container runtime rolls out.
<source>
type systemd
@type systemd
filters [{ "_SYSTEMD_UNIT": "docker.service" }]
pos_file /var/log/gcp-journald-docker.pos
read_from_head true
@ -259,7 +260,15 @@ data:
</source>
<source>
type systemd
@type systemd
filters [{ "_SYSTEMD_UNIT": "{{ container_runtime }}.service" }]
pos_file /var/log/gcp-journald-container-runtime.pos
read_from_head true
tag container-runtime
</source>
<source>
@type systemd
filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
pos_file /var/log/gcp-journald-kubelet.pos
read_from_head true
@ -267,23 +276,13 @@ data:
</source>
<source>
type systemd
@type systemd
filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
pos_file /var/log/gcp-journald-node-problem-detector.pos
read_from_head true
tag node-problem-detector
</source>
monitoring.conf: |-
# Prometheus monitoring
<source>
@type prometheus
port 31337
</source>
<source>
@type prometheus_monitor
</source>
# This source is used to acquire approximate process start timestamp,
# which purpose is explained before the corresponding output plugin.
<source>
@ -356,6 +355,8 @@ data:
# Collect metrics in Prometheus registry about plugin activity.
enable_monitoring true
monitoring_type prometheus
# Allow log entries from multiple containers to be sent in the same request.
split_logs_by_tag false
# Set the buffer type to file to improve the reliability and reduce the memory consumption
buffer_type file
buffer_path /var/log/fluentd-buffers/kubernetes.containers.buffer
@ -376,6 +377,11 @@ data:
disable_retry_limit
# Use multiple threads for processing.
num_threads 2
labels {
# The logging backend will take responsibility for double writing to
# the necessary resource types when this label is set.
"logging.googleapis.com/k8s_compatibility": "true"
}
</match>
# Keep a smaller buffer here since these logs are less important than the user's
@ -386,6 +392,8 @@ data:
detect_json true
enable_monitoring true
monitoring_type prometheus
# Allow entries from multiple system logs to be sent in the same request.
split_logs_by_tag false
detect_subservice false
buffer_type file
buffer_path /var/log/fluentd-buffers/kubernetes.system.buffer
@ -396,9 +404,14 @@ data:
max_retry_wait 30
disable_retry_limit
num_threads 2
labels {
# The logging backend will take responsibility for double writing to
# the necessary resource types when this label is set.
"logging.googleapis.com/k8s_compatibility": "true"
}
</match>
metadata:
name: fluentd-gcp-config-v1.2.3
name: fluentd-gcp-config-v1.2.4
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile

View File

@ -1,13 +1,13 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: fluentd-gcp-v2.0.10
name: fluentd-gcp-v3.0.0
namespace: kube-system
labels:
k8s-app: fluentd-gcp
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v2.0.10
version: v3.0.0
spec:
updateStrategy:
type: RollingUpdate
@ -16,27 +16,19 @@ spec:
labels:
k8s-app: fluentd-gcp
kubernetes.io/cluster-service: "true"
version: v2.0.10
version: v3.0.0
# This annotation ensures that fluentd does not get evicted if the node
# supports critical pod annotation based priority scheme.
# Note that this does not guarantee admission on the nodes (#40573).
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-node-critical
serviceAccountName: fluentd-gcp
dnsPolicy: Default
containers:
- name: fluentd-gcp
image: gcr.io/google-containers/fluentd-gcp:2.0.10
env:
- name: FLUENTD_ARGS
value: --no-supervisor -q
resources:
limits:
memory: 300Mi
requests:
cpu: 100m
memory: 200Mi
image: gcr.io/stackdriver-agents/stackdriver-logging-agent:{{ fluentd_gcp_version }}
volumeMounts:
- name: varlog
mountPath: /var/log
@ -47,7 +39,7 @@ spec:
mountPath: /host/lib
readOnly: true
- name: config-volume
mountPath: /etc/fluent/config.d
mountPath: /etc/google-fluentd/config.d
# Liveness probe is aimed to help in situarions where fluentd
# silently hangs for no apparent reasons until manual restart.
# The idea of this probe is that if fluentd is not queueing or
@ -82,12 +74,12 @@ spec:
fi;
# BEGIN_PROMETHEUS_TO_SD
- name: prometheus-to-sd-exporter
image: gcr.io/google-containers/prometheus-to-sd:v0.2.2
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
command:
- /monitor
- --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons
- --api-override={{ prometheus_to_sd_endpoint }}
- --source=fluentd:http://localhost:31337?whitelisted=stackdriver_successful_requests_count,stackdriver_failed_requests_count,stackdriver_ingested_entries_count,stackdriver_dropped_entries_count
- --source=fluentd:http://localhost:24231?whitelisted=stackdriver_successful_requests_count,stackdriver_failed_requests_count,stackdriver_ingested_entries_count,stackdriver_dropped_entries_count
- --pod-id=$(POD_NAME)
- --namespace-id=$(POD_NAMESPACE)
env:
@ -122,4 +114,4 @@ spec:
path: /usr/lib64
- name: config-volume
configMap:
name: fluentd-gcp-config-v1.2.3
name: fluentd-gcp-config-v1.2.4

View File

@ -0,0 +1,34 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: fluentd-gcp-scaler
namespace: kube-system
labels:
k8s-app: fluentd-gcp-scaler
version: v0.1.0
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: fluentd-gcp-scaler
template:
metadata:
labels:
k8s-app: fluentd-gcp-scaler
spec:
serviceAccountName: fluentd-gcp-scaler
containers:
- name: fluentd-gcp-scaler
image: gcr.io/google-containers/fluentd-gcp-scaler:0.1
command:
- /scaler.sh
- --ds-name=fluentd-gcp-v3.0.0
- --scaling-policy=fluentd-gcp-scaling-policy
env:
# Defaults, used if no overrides are found in fluentd-gcp-scaling-policy
- name: CPU_REQUEST
value: 100m
- name: MEMORY_REQUEST
value: 200Mi
- name: MEMORY_LIMIT
value: 300Mi

View File

@ -0,0 +1,13 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: scalingpolicies.scalingpolicy.kope.io
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
group: scalingpolicy.kope.io
version: v1alpha1
names:
kind: ScalingPolicy
plural: scalingpolicies
scope: Namespaced

View File

@ -0,0 +1,48 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd-gcp-scaler
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: system:fluentd-gcp-scaler
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- "extensions"
resources:
- daemonsets
verbs:
- get
- patch
- apiGroups:
- "scalingpolicy.kope.io"
resources:
- scalingpolicies
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: fluentd-gcp-scaler-binding
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: system:fluentd-gcp-scaler
subjects:
- kind: ServiceAccount
name: fluentd-gcp-scaler
namespace: kube-system

View File

@ -24,11 +24,12 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-node-critical
serviceAccountName: ip-masq-agent
hostNetwork: true
containers:
- name: ip-masq-agent
image: gcr.io/google-containers/ip-masq-agent-amd64:v2.0.2
image: k8s.gcr.io/ip-masq-agent-amd64:v2.0.2
resources:
requests:
cpu: 10m

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,34 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:metadata-agent
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
- "apps"
- "extensions"
resources:
- "*"
verbs:
- watch
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:metadata-agent
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metadata-agent
subjects:
- kind: ServiceAccount
name: metadata-agent
namespace: kube-system

View File

@ -1,23 +1,33 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: metadata-agent
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
labels:
app: stackdriver-agents
app: metadata-agent
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
name: stackdriver-agents
name: metadata-agent
namespace: kube-system
spec:
selector:
matchLabels:
app: stackdriver-agents
app: metadata-agent
template:
metadata:
labels:
app: stackdriver-agents
app: metadata-agent
spec:
serviceAccountName: metadata-agent
containers:
- image: us.gcr.io/container-monitoring-storage/stackdriver-metadata-agent:{{ metadata_agent_version }}
- image: gcr.io/stackdriver-agents/stackdriver-metadata-agent:{{ metadata_agent_version }}
imagePullPolicy: IfNotPresent
name: metadata-agent
ports:

View File

@ -33,24 +33,34 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-node-critical
serviceAccountName: metadata-proxy
hostNetwork: true
dnsPolicy: Default
containers:
- name: metadata-proxy
image: gcr.io/google_containers/metadata-proxy:v0.1.5
image: k8s.gcr.io/metadata-proxy:v0.1.9
securityContext:
privileged: true
# Request and limit resources to get guaranteed QoS.
resources:
requests:
memory: "32Mi"
memory: "25Mi"
cpu: "30m"
limits:
memory: "32Mi"
memory: "25Mi"
cpu: "30m"
# BEGIN_PROMETHEUS_TO_SD
- name: prometheus-to-sd-exporter
image: gcr.io/google_containers/prometheus-to-sd:v0.2.2
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
# Request and limit resources to get guaranteed QoS.
resources:
requests:
memory: "20Mi"
cpu: "2m"
limits:
memory: "20Mi"
cpu: "2m"
command:
- /monitor
- --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons

View File

@ -1,6 +1,8 @@
approvers:
- DirectXMan12
- kawych
- piosz
reviewers:
- DirectXMan12
- kawych
- piosz

View File

@ -23,31 +23,32 @@ data:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: metrics-server-v0.2.0
name: metrics-server-v0.2.1
namespace: kube-system
labels:
k8s-app: metrics-server
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v0.2.0
version: v0.2.1
spec:
selector:
matchLabels:
k8s-app: metrics-server
version: v0.2.0
version: v0.2.1
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
version: v0.2.0
version: v0.2.1
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
containers:
- name: metrics-server
image: gcr.io/google_containers/metrics-server-amd64:v0.2.0
image: k8s.gcr.io/metrics-server-amd64:v0.2.1
command:
- /metrics-server
- --source=kubernetes.summary_api:''
@ -56,7 +57,7 @@ spec:
name: https
protocol: TCP
- name: metrics-server-nanny
image: gcr.io/google_containers/addon-resizer:1.8.1
image: k8s.gcr.io/addon-resizer:1.8.1
resources:
limits:
cpu: 100m
@ -81,10 +82,10 @@ spec:
- --config-dir=/etc/config
- --cpu=40m
- --extra-cpu=0.5m
- --memory=140Mi
- --memory=40Mi
- --extra-memory=4Mi
- --threshold=5
- --deployment=metrics-server-v0.2.0
- --deployment=metrics-server-v0.2.1
- --container=metrics-server
- --poll-period=300000
- --estimator=exponential

View File

@ -43,7 +43,7 @@ spec:
spec:
containers:
- name: node-problem-detector
image: gcr.io/google_containers/node-problem-detector:v0.4.1
image: k8s.gcr.io/node-problem-detector:v0.4.1
command:
- "/bin/sh"
- "-c"

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
IMAGE=gcr.io/google_containers/python
IMAGE=staging-k8s.gcr.io/python
VERSION=v1
.PHONY: build push
@ -21,5 +21,5 @@ build:
docker build --pull -t "$(IMAGE):$(VERSION)" .
push:
gcloud docker -- push "$(IMAGE):$(VERSION)"
docker push "$(IMAGE):$(VERSION)"

View File

@ -1,274 +0,0 @@
# Private Docker Registry in Kubernetes
Kubernetes offers an optional private Docker registry addon, which you can turn
on when you bring up a cluster or install later. This gives you a place to
store truly private Docker images for your cluster.
## How it works
The private registry runs as a `Pod` in your cluster. It does not currently
support SSL or authentication, which triggers Docker's "insecure registry"
logic. To work around this, we run a proxy on each node in the cluster,
exposing a port onto the node (via a hostPort), which Docker accepts as
"secure", since it is accessed by `localhost`.
## Turning it on
Some cluster installs (e.g. GCE) support this as a cluster-birth flag. The
`ENABLE_CLUSTER_REGISTRY` variable in `cluster/gce/config-default.sh` governs
whether the registry is run or not. To set this flag, you can specify
`KUBE_ENABLE_CLUSTER_REGISTRY=true` when running `kube-up.sh`. If your cluster
does not include this flag, the following steps should work. Note that some of
this is cloud-provider specific, so you may have to customize it a bit.
### Make some storage
The primary job of the registry is to store data. To do that we have to decide
where to store it. For cloud environments that have networked storage, we can
use Kubernetes's `PersistentVolume` abstraction. The following template is
expanded by `salt` in the GCE cluster turnup, but can easily be adapted to
other situations:
<!-- BEGIN MUNGE: EXAMPLE registry-pv.yaml.in -->
```yaml
kind: PersistentVolume
apiVersion: v1
metadata:
name: kube-system-kube-registry-pv
labels:
kubernetes.io/cluster-service: "true"
spec:
{% if pillar.get('cluster_registry_disk_type', '') == 'gce' %}
capacity:
storage: {{ pillar['cluster_registry_disk_size'] }}
accessModes:
- ReadWriteOnce
gcePersistentDisk:
pdName: "{{ pillar['cluster_registry_disk_name'] }}"
fsType: "ext4"
{% endif %}
```
<!-- END MUNGE: EXAMPLE registry-pv.yaml.in -->
If, for example, you wanted to use NFS you would just need to change the
`gcePersistentDisk` block to `nfs`. See
[here](https://kubernetes.io/docs/user-guide/volumes.md) for more details on volumes.
Note that in any case, the storage (in the case the GCE PersistentDisk) must be
created independently - this is not something Kubernetes manages for you (yet).
### I don't want or don't have persistent storage
If you are running in a place that doesn't have networked storage, or if you
just want to kick the tires on this without committing to it, you can easily
adapt the `ReplicationController` specification below to use a simple
`emptyDir` volume instead of a `persistentVolumeClaim`.
## Claim the storage
Now that the Kubernetes cluster knows that some storage exists, you can put a
claim on that storage. As with the `PersistentVolume` above, you can start
with the `salt` template:
<!-- BEGIN MUNGE: EXAMPLE registry-pvc.yaml.in -->
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: kube-registry-pvc
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ pillar['cluster_registry_disk_size'] }}
```
<!-- END MUNGE: EXAMPLE registry-pvc.yaml.in -->
This tells Kubernetes that you want to use storage, and the `PersistentVolume`
you created before will be bound to this claim (unless you have other
`PersistentVolumes` in which case those might get bound instead). This claim
gives you the right to use this storage until you release the claim.
## Run the registry
Now we can run a Docker registry:
<!-- BEGIN MUNGE: EXAMPLE registry-rc.yaml -->
```yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry-upstream
version: v0
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry-upstream
version: v0
template:
metadata:
labels:
k8s-app: kube-registry-upstream
version: v0
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
limits:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
persistentVolumeClaim:
claimName: kube-registry-pvc
```
<!-- END MUNGE: EXAMPLE registry-rc.yaml -->
## Expose the registry in the cluster
Now that we have a registry `Pod` running, we can expose it as a Service:
<!-- BEGIN MUNGE: EXAMPLE registry-svc.yaml -->
```yaml
apiVersion: v1
kind: Service
metadata:
name: kube-registry
namespace: kube-system
labels:
k8s-app: kube-registry-upstream
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeRegistry"
spec:
selector:
k8s-app: kube-registry-upstream
ports:
- name: registry
port: 5000
protocol: TCP
```
<!-- END MUNGE: EXAMPLE registry-svc.yaml -->
## Expose the registry on each node
Now that we have a running `Service`, we need to expose it onto each Kubernetes
`Node` so that Docker will see it as `localhost`. We can load a `Pod` on every
node by creating following daemonset.
<!-- BEGIN MUNGE: EXAMPLE ../../saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml -->
```yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-registry-proxy
namespace: kube-system
labels:
k8s-app: kube-registry-proxy
kubernetes.io/cluster-service: "true"
version: v0.4
spec:
template:
metadata:
labels:
k8s-app: kube-registry-proxy
kubernetes.io/name: "kube-registry-proxy"
kubernetes.io/cluster-service: "true"
version: v0.4
spec:
containers:
- name: kube-registry-proxy
image: gcr.io/google_containers/kube-registry-proxy:0.4
resources:
limits:
cpu: 100m
memory: 50Mi
env:
- name: REGISTRY_HOST
value: kube-registry.kube-system.svc.cluster.local
- name: REGISTRY_PORT
value: "5000"
ports:
- name: registry
containerPort: 80
hostPort: 5000
```
<!-- END MUNGE: EXAMPLE ../../saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml -->
When modifying replication-controller, service and daemon-set defintions, take
care to ensure _unique_ identifiers for the rc-svc couple and the daemon-set.
Failing to do so will have register the localhost proxy daemon-sets to the
upstream service. As a result they will then try to proxy themselves, which
will, for obvious reasons, not work.
This ensures that port 5000 on each node is directed to the registry `Service`.
You should be able to verify that it is running by hitting port 5000 with a web
browser and getting a 404 error:
```console
$ curl localhost:5000
404 page not found
```
## Using the registry
To use an image hosted by this registry, simply say this in your `Pod`'s
`spec.containers[].image` field:
```yaml
image: localhost:5000/user/container
```
Before you can use the registry, you have to be able to get images into it,
though. If you are building an image on your Kubernetes `Node`, you can spell
out `localhost:5000` when you build and push. More likely, though, you are
building locally and want to push to your cluster.
You can use `kubectl` to set up a port-forward from your local node to a
running Pod:
```console
$ POD=$(kubectl get pods --namespace kube-system -l k8s-app=kube-registry-upstream \
-o template --template '{{range .items}}{{.metadata.name}} {{.status.phase}}{{"\n"}}{{end}}' \
| grep Running | head -1 | cut -f1 -d' ')
$ kubectl port-forward --namespace kube-system $POD 5000:5000 &
```
Now you can build and push images on your local computer as
`localhost:5000/yourname/container` and those images will be available inside
your kubernetes cluster with the same name.
# More Extensions
- [Use GCS as storage backend](gcs/README.md)
- [Enable TLS/SSL](tls/README.md)
- [Enable Authentication](auth/README.md)
## Future improvements
* Allow port-forwarding to a Service rather than a pod (#15180)
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/README.md?pixel)]()

View File

@ -1,92 +0,0 @@
# Enable Authentication with Htpasswd for Kube-Registry
Docker registry support a few authentication providers. Full list of supported provider can be found [here](https://docs.docker.com/registry/configuration/#auth). This document describes how to enable authentication with htpasswd for kube-registry.
### Prepare Htpasswd Secret
Please generate your own htpasswd file. Assuming the file you generated is `htpasswd`.
Creating secret to hold htpasswd...
```console
$ kubectl --namespace=kube-system create secret generic registry-auth-secret --from-file=htpasswd=htpasswd
```
### Run Registry
Please be noted that this sample rc is using emptyDir as storage backend for simplicity.
<!-- BEGIN MUNGE: EXAMPLE registry-auth-rc.yaml -->
```yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
- name: REGISTRY_AUTH_HTPASSWD_REALM
value: basic_realm
- name: REGISTRY_AUTH_HTPASSWD_PATH
value: /auth/htpasswd
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
- name: auth-dir
mountPath: /auth
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
emptyDir: {}
- name: auth-dir
secret:
secretName: registry-auth-secret
```
<!-- END MUNGE: EXAMPLE registry-auth-rc.yaml -->
No changes are needed for other components (kube-registry service and proxy).
### To Verify
Setup proxy or port-forwarding to the kube-registry. Image push/pull should fail without authentication. Then use `docker login` to authenticate with kube-registry and see if it works.
### Configure Nodes to Authenticate with Kube-Registry
By default, nodes assume no authentication is required by kube-registry. Without authentication, nodes cannot pull images from kube-registry. To solve this, more documentation can be found [Here](https://github.com/kubernetes/kubernetes.github.io/blob/master/docs/concepts/containers/images.md#configuring-nodes-to-authenticate-to-a-private-repository).
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/auth/README.md?pixel)]()

View File

@ -1,56 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
- name: REGISTRY_AUTH_HTPASSWD_REALM
value: basic_realm
- name: REGISTRY_AUTH_HTPASSWD_PATH
value: /auth/htpasswd
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
- name: auth-dir
mountPath: /auth
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
emptyDir: {}
- name: auth-dir
secret:
secretName: registry-auth-secret

View File

@ -1,81 +0,0 @@
# Kube-Registry with GCS storage backend
Besides local file system, docker registry also supports a number of cloud storage backends. Full list of supported backend can be found [here](https://docs.docker.com/registry/configuration/#storage). This document describes how to enable GCS for kube-registry as storage backend.
A few preparation steps are needed.
1. Create a bucket named kube-registry in GCS.
1. Create a service account for GCS access and create key file in json format. Detail instruction can be found [here](https://cloud.google.com/storage/docs/authentication#service_accounts).
### Pack Keyfile into a Secret
Assuming you have downloaded the keyfile as `keyfile.json`. Create secret with the `keyfile.json`...
```console
$ kubectl --namespace=kube-system create secret generic gcs-key-secret --from-file=keyfile=keyfile.json
```
### Run Registry
<!-- BEGIN MUNGE: EXAMPLE registry-gcs-rc.yaml -->
```yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE
value: gcs
- name: REGISTRY_STORAGE_GCS_BUCKET
value: kube-registry
- name: REGISTRY_STORAGE_GCS_KEYFILE
value: /gcs/keyfile
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumeMounts:
- name: gcs-key
mountPath: /gcs
volumes:
- name: gcs-key
secret:
secretName: gcs-key-secret
```
<!-- END MUNGE: EXAMPLE registry-gcs-rc.yaml -->
No changes are needed for other components (kube-registry service and proxy).
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/gcs/README.md?pixel)]()

View File

@ -1,52 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE
value: gcs
- name: REGISTRY_STORAGE_GCS_BUCKET
value: kube-registry
- name: REGISTRY_STORAGE_GCS_KEYFILE
value: /gcs/keyfile
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumeMounts:
- name: gcs-key
mountPath: /gcs
volumes:
- name: gcs-key
secret:
secretName: gcs-key-secret

View File

@ -1,26 +0,0 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM nginx:1.11
RUN apt-get update \
&& apt-get install -y \
curl \
--no-install-recommends \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /usr/share/man /usr/share/doc
COPY rootfs /
CMD ["/bin/boot"]

View File

@ -1,24 +0,0 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.PHONY: build push vet test clean
TAG = 0.4
REPO = gcr.io/google_containers/kube-registry-proxy
build:
docker build --pull -t $(REPO):$(TAG) .
push:
gcloud docker -- push $(REPO):$(TAG)

View File

@ -1,23 +0,0 @@
#!/usr/bin/env bash
# fail if no hostname is provided
REGISTRY_HOST=${REGISTRY_HOST:?no host}
REGISTRY_PORT=${REGISTRY_PORT:-5000}
# we are always listening on port 80
# https://github.com/nginxinc/docker-nginx/blob/43c112100750cbd1e9f2160324c64988e7920ac9/stable/jessie/Dockerfile#L25
PORT=80
sed -e "s/%HOST%/$REGISTRY_HOST/g" \
-e "s/%PORT%/$REGISTRY_PORT/g" \
-e "s/%BIND_PORT%/$PORT/g" \
</etc/nginx/conf.d/default.conf.in >/etc/nginx/conf.d/default.conf
# wait for registry to come online
while ! curl -sS "$REGISTRY_HOST:$REGISTRY_PORT" &>/dev/null; do
printf "waiting for the registry (%s:%s) to come online...\n" "$REGISTRY_HOST" "$REGISTRY_PORT"
sleep 1
done
printf "starting proxy...\n"
exec nginx -g "daemon off;" "$@"

View File

@ -1,28 +0,0 @@
# Docker registry proxy for api version 2
upstream docker-registry {
server %HOST%:%PORT%;
}
# No client auth or TLS
# TODO(bacongobbler): experiment with authenticating the registry if it's using TLS
server {
listen %BIND_PORT%;
server_name localhost;
# disable any limits to avoid HTTP 413 for large image uploads
client_max_body_size 0;
# required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486)
chunked_transfer_encoding on;
location / {
# Do not allow connections from docker 1.5 and earlier
# docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents
if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) {
return 404;
}
include docker-registry.conf;
}
}

View File

@ -1,6 +0,0 @@
proxy_pass http://docker-registry;
proxy_set_header Host $http_host; # required for docker client's sake
proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 900;

View File

@ -1,26 +0,0 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
include /etc/nginx/conf.d/*.conf;
}

View File

@ -1,17 +0,0 @@
kind: PersistentVolume
apiVersion: v1
metadata:
name: kube-system-kube-registry-pv
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
{% if pillar.get('cluster_registry_disk_type', '') == 'gce' %}
capacity:
storage: {{ pillar['cluster_registry_disk_size'] }}
accessModes:
- ReadWriteOnce
gcePersistentDisk:
pdName: "{{ pillar['cluster_registry_disk_name'] }}"
fsType: "ext4"
{% endif %}

View File

@ -1,14 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: kube-registry-pvc
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ pillar['cluster_registry_disk_size'] }}

View File

@ -1,49 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry-upstream
version: v0
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
replicas: 1
selector:
k8s-app: kube-registry-upstream
version: v0
template:
metadata:
labels:
k8s-app: kube-registry-upstream
version: v0
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2.5.1
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
persistentVolumeClaim:
claimName: kube-registry-pvc

View File

@ -1,17 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: kube-registry
namespace: kube-system
labels:
k8s-app: kube-registry-upstream
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeRegistry"
spec:
selector:
k8s-app: kube-registry-upstream
ports:
- name: registry
port: 5000
protocol: TCP

View File

@ -1,116 +0,0 @@
# Enable TLS for Kube-Registry
This document describes how to enable TLS for kube-registry. Before you start, please check if you have all the prerequisite:
- A domain for kube-registry. Assuming it is ` myregistrydomain.com`.
- Domain certificate and key. Assuming they are `domain.crt` and `domain.key`
### Pack domain.crt and domain.key into a Secret
```console
$ kubectl --namespace=kube-system create secret generic registry-tls-secret --from-file=domain.crt=domain.crt --from-file=domain.key=domain.key
```
### Run Registry
Please be noted that this sample rc is using emptyDir as storage backend for simplicity.
<!-- BEGIN MUNGE: EXAMPLE registry-tls-rc.yaml -->
```yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
- name: REGISTRY_HTTP_TLS_CERTIFICATE
value: /certs/domain.crt
- name: REGISTRY_HTTP_TLS_KEY
value: /certs/domain.key
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
- name: cert-dir
mountPath: /certs
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
emptyDir: {}
- name: cert-dir
secret:
secretName: registry-tls-secret
```
<!-- END MUNGE: EXAMPLE registry-tls-rc.yaml -->
### Expose External IP for Kube-Registry
Modify the default kube-registry service to `LoadBalancer` type and point the DNS record of `myregistrydomain.com` to the service external ip.
<!-- BEGIN MUNGE: EXAMPLE registry-tls-svc.yaml -->
```yaml
apiVersion: v1
kind: Service
metadata:
name: kube-registry
namespace: kube-system
labels:
k8s-app: kube-registry
# kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeRegistry"
spec:
selector:
k8s-app: kube-registry
type: LoadBalancer
ports:
- name: registry
port: 5000
protocol: TCP
```
<!-- END MUNGE: EXAMPLE registry-tls-svc.yaml -->
### To Verify
Now you should be able to access your kube-registry from another docker host.
```console
docker pull busybox
docker tag busybox myregistrydomain.com:5000/busybox
docker push myregistrydomain.com:5000/busybox
docker pull myregistrydomain.com:5000/busybox
```
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/tls/README.md?pixel)]()

View File

@ -1,57 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
- name: REGISTRY_HTTP_TLS_CERTIFICATE
value: /certs/domain.crt
- name: REGISTRY_HTTP_TLS_KEY
value: /certs/domain.key
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
- name: cert-dir
mountPath: /certs
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
emptyDir: {}
- name: cert-dir
secret:
secretName: registry-tls-secret

View File

@ -1,17 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: kube-registry
namespace: kube-system
labels:
k8s-app: kube-registry
# kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeRegistry"
spec:
selector:
k8s-app: kube-registry
type: LoadBalancer
ports:
- name: registry
port: 5000
protocol: TCP