mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
2
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/kube-addons.sh
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/kube-addons.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
|
16
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-clusterrole.yaml
generated
vendored
16
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-clusterrole.yaml
generated
vendored
@ -36,6 +36,7 @@ rules:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- patch
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
@ -51,17 +52,28 @@ rules:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- globalfelixconfigs
|
||||
- felixconfigurations
|
||||
- bgppeers
|
||||
- globalbgpconfigs
|
||||
- bgpconfigurations
|
||||
- ippools
|
||||
- globalnetworkpolicies
|
||||
- globalnetworksets
|
||||
- networkpolicies
|
||||
- clusterinformations
|
||||
- hostendpoints
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- watch
|
||||
|
@ -41,18 +41,22 @@ spec:
|
||||
value: "none"
|
||||
- name: DATASTORE_TYPE
|
||||
value: "kubernetes"
|
||||
- name: FELIX_TYPHAK8SSERVICENAME
|
||||
value: "calico-typha"
|
||||
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
|
||||
value: "ACCEPT"
|
||||
- name: FELIX_HEALTHENABLED
|
||||
value: "true"
|
||||
- name: FELIX_IPV6SUPPORT
|
||||
value: "false"
|
||||
- name: FELIX_LOGSEVERITYSYS
|
||||
value: "none"
|
||||
- name: FELIX_LOGSEVERITYSCREEN
|
||||
value: "info"
|
||||
- name: FELIX_PROMETHEUSMETRICSENABLED
|
||||
value: "true"
|
||||
- name: FELIX_HEALTHENABLED
|
||||
value: "true"
|
||||
- name: FELIX_REPORTINGINTERVALSECS
|
||||
value: "0"
|
||||
- name: FELIX_TYPHAK8SSERVICENAME
|
||||
value: "calico-typha"
|
||||
- name: IP
|
||||
value: ""
|
||||
- name: NO_DEFAULT_POOLS
|
||||
@ -84,6 +88,12 @@ spec:
|
||||
- mountPath: /etc/calico
|
||||
name: etc-calico
|
||||
readOnly: true
|
||||
- mountPath: /var/run/calico
|
||||
name: var-run-calico
|
||||
readOnly: false
|
||||
- mountPath: /var/lib/calico
|
||||
name: var-lib-calico
|
||||
readOnly: false
|
||||
# This container installs the Calico CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
@ -149,6 +159,12 @@ spec:
|
||||
- name: cni-net-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: var-run-calico
|
||||
hostPath:
|
||||
path: /var/run/calico
|
||||
- name: var-lib-calico
|
||||
hostPath:
|
||||
path: /var/lib/calico
|
||||
tolerations:
|
||||
# Make sure calico/node gets scheduled on all nodes.
|
||||
- effect: NoSchedule
|
||||
|
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/clusterinformations-crd.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/clusterinformations-crd.yaml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: clusterinformations.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: ClusterInformation
|
||||
plural: clusterinformations
|
||||
singular: clusterinformation
|
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/felixconfigurations-crd.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/felixconfigurations-crd.yaml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: felixconfigurations.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: FelixConfiguration
|
||||
plural: felixconfigurations
|
||||
singular: felixconfiguration
|
@ -1,5 +1,4 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
description: Calico Global BGP Configuration
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalbgpconfigs.crd.projectcalico.org
|
||||
|
@ -1,5 +1,4 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
description: Calico Global Felix Configuration
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalfelixconfigs.crd.projectcalico.org
|
||||
|
@ -1,5 +1,4 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
description: Calico Global Network Policies
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalnetworkpolicies.crd.projectcalico.org
|
||||
|
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/globalnetworksets-crd.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/globalnetworksets-crd.yaml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalnetworksets.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: GlobalNetworkSet
|
||||
plural: globalnetworksets
|
||||
singular: globalnetworkset
|
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/hostendpoints-crd.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/hostendpoints-crd.yaml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: hostendpoints.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: HostEndpoint
|
||||
plural: hostendpoints
|
||||
singular: hostendpoint
|
1
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/ippool-crd.yaml
generated
vendored
1
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/ippool-crd.yaml
generated
vendored
@ -1,5 +1,4 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
description: Calico IP Pools
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: ippools.crd.projectcalico.org
|
||||
|
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/networkpolicies-crd.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/networkpolicies-crd.yaml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: networkpolicies.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Namespaced
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: NetworkPolicy
|
||||
plural: networkpolicies
|
||||
singular: networkpolicy
|
@ -44,6 +44,8 @@ spec:
|
||||
value: "9093"
|
||||
- name: TYPHA_DATASTORETYPE
|
||||
value: "kubernetes"
|
||||
- name: TYPHA_REPORTINGINTERVALSECS
|
||||
value: "0"
|
||||
- name: TYPHA_MAXCONNECTIONSLOWERLIMIT
|
||||
value: "1"
|
||||
- name: TYPHA_HEALTHENABLED
|
||||
|
@ -18,7 +18,7 @@ spec:
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2
|
||||
- image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2
|
||||
name: autoscaler
|
||||
command:
|
||||
- /cluster-proportional-autoscaler
|
||||
|
14
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-clusterrole.yaml
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-clusterrole.yaml
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: typha-cpva
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["list"]
|
||||
- apiGroups: ["apps", "extensions"]
|
||||
resources: ["deployments"]
|
||||
verbs: ["patch"]
|
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-clusterrolebinding.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-clusterrolebinding.yaml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: typha-cpva
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: typha-cpva
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: typha-cpva
|
||||
namespace: kube-system
|
8
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-serviceaccount.yaml
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-serviceaccount.yaml
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: typha-cpva
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
@ -9,7 +9,6 @@ metadata:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: glbc
|
||||
@ -18,6 +17,8 @@ spec:
|
||||
labels:
|
||||
k8s-app: glbc
|
||||
name: glbc
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
containers:
|
||||
- name: default-http-backend
|
||||
|
13
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/README.md
generated
vendored
13
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/README.md
generated
vendored
@ -5,4 +5,17 @@ Heapster collects signals from kubelets and the api server, processes them, and
|
||||
|
||||
More details can be found in [Monitoring user guide](http://kubernetes.io/docs/user-guide/monitoring/).
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Heapster supports up to 30 pods per cluster node. In clusters where there are more running pods, Heapster may be throttled or fail with OOM error. Starting with Kubernetes 1.9.2, Heapster resource requirements may be overwritten manually. [Learn more about Addon Resizer configuration](https://github.com/kubernetes/autoscaler/tree/master/addon-resizer#addon-resizer-configuration)
|
||||
|
||||
### Important notices
|
||||
|
||||
Decreasing resource requirements for cluster addons may cause system instability. The effects may include (but are not limited to):
|
||||
- Metrics not being exported
|
||||
- Horizontal Pod Autoscaler not working
|
||||
- `kubectl top` not working
|
||||
|
||||
Overwritten configuration persists through cluster updates, therefore may cause all effects above after a cluster update.
|
||||
|
||||
[]()
|
||||
|
@ -36,30 +36,31 @@ data:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.5.0
|
||||
name: heapster-v1.5.3
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.5.0
|
||||
version: v1.5.3
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.0
|
||||
version: v1.5.3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.0
|
||||
version: v1.5.3
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.0
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@ -72,7 +73,7 @@ spec:
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --sink=gcm
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.0
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
name: eventer
|
||||
command:
|
||||
- /eventer
|
||||
@ -107,7 +108,7 @@ spec:
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{metrics_memory_per_node}}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.0
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
@ -140,7 +141,7 @@ spec:
|
||||
- --memory={{base_eventer_memory}}
|
||||
- --extra-memory={{eventer_memory_per_node}}Ki
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.0
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --container=eventer
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
|
@ -36,30 +36,31 @@ data:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.5.0
|
||||
name: heapster-v1.5.3
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.5.0
|
||||
version: v1.5.3
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.0
|
||||
version: v1.5.3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.0
|
||||
version: v1.5.3
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.0
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@ -73,7 +74,7 @@ spec:
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --sink=influxdb:http://monitoring-influxdb:8086
|
||||
- --sink=gcm:?metrics=autoscaling
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.0
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
name: eventer
|
||||
command:
|
||||
- /eventer
|
||||
@ -108,7 +109,7 @@ spec:
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{ metrics_memory_per_node }}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.0
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
@ -141,7 +142,7 @@ spec:
|
||||
- --memory={{ base_eventer_memory }}
|
||||
- --extra-memory={{ eventer_memory_per_node }}Ki
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.0
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --container=eventer
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
|
@ -36,30 +36,31 @@ data:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.5.0
|
||||
name: heapster-v1.5.3
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.5.0
|
||||
version: v1.5.3
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.0
|
||||
version: v1.5.3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.0
|
||||
version: v1.5.3
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.0
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@ -72,7 +73,7 @@ spec:
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --sink=influxdb:http://monitoring-influxdb:8086
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.0
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
name: eventer
|
||||
command:
|
||||
- /eventer
|
||||
@ -107,7 +108,7 @@ spec:
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{ metrics_memory_per_node }}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.0
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
@ -140,7 +141,7 @@ spec:
|
||||
- --memory={{ base_eventer_memory }}
|
||||
- --extra-memory={{ eventer_memory_per_node }}Ki
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.0
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --container=eventer
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
|
@ -21,6 +21,7 @@ spec:
|
||||
version: v4
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
|
@ -23,30 +23,31 @@ data:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.5.0
|
||||
name: heapster-v1.5.3
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.5.0
|
||||
version: v1.5.3
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.0
|
||||
version: v1.5.3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.0
|
||||
version: v1.5.3
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.0
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@ -58,7 +59,7 @@ spec:
|
||||
command:
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --sink=stackdriver:?cluster_name={{ cluster_name }}&use_old_resources={{ use_old_resources }}&use_new_resources={{ use_new_resources }}&min_interval_sec=100&batch_export_timeout_sec=110
|
||||
- --sink=stackdriver:?cluster_name={{ cluster_name }}&use_old_resources={{ use_old_resources }}&use_new_resources={{ use_new_resources }}&min_interval_sec=100&batch_export_timeout_sec=110&cluster_location={{ cluster_location }}
|
||||
# BEGIN_PROMETHEUS_TO_SD
|
||||
- name: prom-to-sd
|
||||
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
|
||||
@ -108,7 +109,7 @@ spec:
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{metrics_memory_per_node}}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.0
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
|
@ -23,30 +23,31 @@ data:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.5.0
|
||||
name: heapster-v1.5.3
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.5.0
|
||||
version: v1.5.3
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.0
|
||||
version: v1.5.3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.0
|
||||
version: v1.5.3
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.0
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@ -87,7 +88,7 @@ spec:
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{ metrics_memory_per_node }}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.0
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-controller.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-controller.yaml
generated
vendored
@ -26,6 +26,7 @@ spec:
|
||||
k8s-app: kubernetes-dashboard
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
@ -42,6 +43,7 @@ spec:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
# PLATFORM-SPECIFIC ARGS HERE
|
||||
- --auto-generate-certificates
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
|
14
vendor/k8s.io/kubernetes/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml
generated
vendored
14
vendor/k8s.io/kubernetes/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml
generated
vendored
@ -1,4 +1,4 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: nvidia-gpu-device-plugin
|
||||
@ -7,6 +7,9 @@ metadata:
|
||||
k8s-app: nvidia-gpu-device-plugin
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: nvidia-gpu-device-plugin
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
@ -23,11 +26,10 @@ spec:
|
||||
- key: cloud.google.com/gke-accelerator
|
||||
operator: Exists
|
||||
tolerations:
|
||||
- key: "nvidia.com/gpu"
|
||||
- operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
- operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
operator: "Exists"
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
volumes:
|
||||
- name: device-plugin
|
||||
hostPath:
|
||||
@ -53,3 +55,5 @@ spec:
|
||||
mountPath: /device-plugin
|
||||
- name: dev
|
||||
mountPath: /dev
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
|
@ -77,6 +77,7 @@ spec:
|
||||
k8s-app: kube-dns-autoscaler
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
@ -91,7 +92,7 @@ spec:
|
||||
- --namespace=kube-system
|
||||
- --configmap=kube-dns-autoscaler
|
||||
# Should keep target in sync with cluster/addons/dns/kube-dns.yaml.base
|
||||
- --target=Deployment/kube-dns
|
||||
- --target={{.Target}}
|
||||
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
|
||||
# If using small nodes, "nodesPerReplica" should dominate.
|
||||
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
|
||||
|
@ -29,6 +29,6 @@ all: transform
|
||||
%.sed: %.base
|
||||
sed -f transforms2sed.sed $< | sed s/__SOURCE_FILENAME__/$</g > $@
|
||||
|
||||
transform: kube-dns.yaml.in kube-dns.yaml.sed coredns.yaml.in coredns.yaml.sed
|
||||
transform: coredns.yaml.in coredns.yaml.sed
|
||||
|
||||
.PHONY: transform
|
@ -58,14 +58,15 @@ data:
|
||||
.:53 {
|
||||
errors
|
||||
health
|
||||
kubernetes __PILLAR__DNS__DOMAIN__ __PILLAR__CLUSTER_CIDR__ {
|
||||
kubernetes __PILLAR__DNS__DOMAIN__ in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
upstream /etc/resolv.conf
|
||||
upstream
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
prometheus :9153
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
reload
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
@ -74,23 +75,28 @@ metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: coredns
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
replicas: 2
|
||||
# replicas: not specified here:
|
||||
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
|
||||
# 2. Default is 1.
|
||||
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: coredns
|
||||
k8s-app: kube-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: coredns
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
serviceAccountName: coredns
|
||||
tolerations:
|
||||
@ -98,21 +104,9 @@ spec:
|
||||
effect: NoSchedule
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: k8s-app
|
||||
operator: In
|
||||
values:
|
||||
- coredns
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: coredns
|
||||
image: coredns/coredns:1.0.4
|
||||
image: k8s.gcr.io/coredns:1.1.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
@ -124,6 +118,7 @@ spec:
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/coredns
|
||||
readOnly: true
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
@ -131,6 +126,9 @@ spec:
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
- containerPort: 9153
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
@ -140,6 +138,14 @@ spec:
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
dnsPolicy: Default
|
||||
volumes:
|
||||
- name: config-volume
|
||||
@ -152,16 +158,19 @@ spec:
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: coredns
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/port: "9153"
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
k8s-app: coredns
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: coredns
|
||||
k8s-app: kube-dns
|
||||
clusterIP: __PILLAR__DNS__SERVER__
|
||||
ports:
|
||||
- name: dns
|
@ -58,14 +58,15 @@ data:
|
||||
.:53 {
|
||||
errors
|
||||
health
|
||||
kubernetes {{ pillar['dns_domain'] }} {{ pillar['service_cluster_ip_range'] }} {
|
||||
kubernetes {{ pillar['dns_domain'] }} in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
upstream /etc/resolv.conf
|
||||
upstream
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
prometheus :9153
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
reload
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
@ -74,23 +75,28 @@ metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: coredns
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
replicas: 2
|
||||
# replicas: not specified here:
|
||||
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
|
||||
# 2. Default is 1.
|
||||
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: coredns
|
||||
k8s-app: kube-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: coredns
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
serviceAccountName: coredns
|
||||
tolerations:
|
||||
@ -98,21 +104,9 @@ spec:
|
||||
effect: NoSchedule
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: k8s-app
|
||||
operator: In
|
||||
values:
|
||||
- coredns
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: coredns
|
||||
image: coredns/coredns:1.0.4
|
||||
image: k8s.gcr.io/coredns:1.1.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
@ -124,6 +118,7 @@ spec:
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/coredns
|
||||
readOnly: true
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
@ -131,6 +126,9 @@ spec:
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
- containerPort: 9153
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
@ -140,6 +138,14 @@ spec:
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
dnsPolicy: Default
|
||||
volumes:
|
||||
- name: config-volume
|
||||
@ -152,16 +158,19 @@ spec:
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: coredns
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/port: "9153"
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
k8s-app: coredns
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: coredns
|
||||
k8s-app: kube-dns
|
||||
clusterIP: {{ pillar['dns_server'] }}
|
||||
ports:
|
||||
- name: dns
|
@ -58,14 +58,15 @@ data:
|
||||
.:53 {
|
||||
errors
|
||||
health
|
||||
kubernetes $DNS_DOMAIN $SERVICE_CLUSTER_IP_RANGE {
|
||||
kubernetes $DNS_DOMAIN in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
upstream /etc/resolv.conf
|
||||
upstream
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
prometheus :9153
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
reload
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
@ -74,23 +75,28 @@ metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: coredns
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
replicas: 2
|
||||
# replicas: not specified here:
|
||||
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
|
||||
# 2. Default is 1.
|
||||
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: coredns
|
||||
k8s-app: kube-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: coredns
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
serviceAccountName: coredns
|
||||
tolerations:
|
||||
@ -98,21 +104,9 @@ spec:
|
||||
effect: NoSchedule
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: k8s-app
|
||||
operator: In
|
||||
values:
|
||||
- coredns
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: coredns
|
||||
image: coredns/coredns:1.0.4
|
||||
image: k8s.gcr.io/coredns:1.1.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
@ -124,6 +118,7 @@ spec:
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/coredns
|
||||
readOnly: true
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
@ -131,6 +126,9 @@ spec:
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
- containerPort: 9153
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
@ -140,6 +138,14 @@ spec:
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
dnsPolicy: Default
|
||||
volumes:
|
||||
- name: config-volume
|
||||
@ -152,16 +158,19 @@ spec:
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: coredns
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/port: "9153"
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
k8s-app: coredns
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: coredns
|
||||
k8s-app: kube-dns
|
||||
clusterIP: $DNS_SERVER_IP
|
||||
ports:
|
||||
- name: dns
|
34
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/Makefile
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/Makefile
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Makefile for the kubedns underscore templates to Salt/Pillar and other formats.
|
||||
|
||||
# If you update the *.base templates, please run this Makefile before pushing.
|
||||
#
|
||||
# Usage:
|
||||
# make
|
||||
|
||||
all: transform
|
||||
|
||||
# .base -> .in pattern rule
|
||||
%.in: %.base
|
||||
sed -f transforms2salt.sed $< | sed s/__SOURCE_FILENAME__/$</g > $@
|
||||
|
||||
# .base -> .sed pattern rule
|
||||
%.sed: %.base
|
||||
sed -f transforms2sed.sed $< | sed s/__SOURCE_FILENAME__/$</g > $@
|
||||
|
||||
transform: kube-dns.yaml.in kube-dns.yaml.sed
|
||||
|
||||
.PHONY: transform
|
@ -83,6 +83,7 @@ spec:
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
@ -95,7 +96,7 @@ spec:
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.8
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
@ -146,7 +147,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.8
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
@ -185,7 +186,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.8
|
||||
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
@ -83,6 +83,7 @@ spec:
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
@ -95,7 +96,7 @@ spec:
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.8
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
@ -146,7 +147,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.8
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
@ -185,7 +186,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.8
|
||||
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
@ -83,6 +83,7 @@ spec:
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
@ -95,7 +96,7 @@ spec:
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.8
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
@ -146,7 +147,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.8
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
@ -185,7 +186,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.8
|
||||
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
4
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/transforms2salt.sed
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/transforms2salt.sed
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
s/__PILLAR__DNS__SERVER__/{{ pillar['dns_server'] }}/g
|
||||
s/__PILLAR__DNS__DOMAIN__/{{ pillar['dns_domain'] }}/g
|
||||
s/__PILLAR__CLUSTER_CIDR__/{{ pillar['service_cluster_ip_range'] }}/g
|
||||
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g
|
4
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/transforms2sed.sed
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/transforms2sed.sed
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
s/__PILLAR__DNS__SERVER__/$DNS_SERVER_IP/g
|
||||
s/__PILLAR__DNS__DOMAIN__/$DNS_DOMAIN/g
|
||||
s/__PILLAR__CLUSTER_CIDR__/$SERVICE_CLUSTER_IP_RANGE/g
|
||||
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g
|
@ -1,27 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: etcd-empty-dir-cleanup
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: etcd-empty-dir-cleanup
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: etcd-empty-dir-cleanup
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
labels:
|
||||
k8s-app: etcd-empty-dir-cleanup
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: etcd-empty-dir-cleanup
|
||||
hostNetwork: true
|
||||
dnsPolicy: Default
|
||||
containers:
|
||||
- name: etcd-empty-dir-cleanup
|
||||
image: k8s.gcr.io/etcd-empty-dir-cleanup:3.1.10.0
|
@ -1,16 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: gce:podsecuritypolicy:etcd-empty-dir-cleanup
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/cluster-service: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: gce:podsecuritypolicy:etcd-empty-dir-cleanup
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: etcd-empty-dir-cleanup
|
||||
namespace: kube-system
|
@ -1,17 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: gce:podsecuritypolicy:etcd-empty-dir-cleanup
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resourceNames:
|
||||
- gce.etcd-empty-dir-cleanup
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
@ -1,31 +0,0 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: gce.etcd-empty-dir-cleanup
|
||||
annotations:
|
||||
kubernetes.io/description: 'Policy used by the etcd-empty-dir-cleanup addon.'
|
||||
# TODO: etcd-empty-dir-cleanup should run with the default seccomp profile
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
# 'runtime/default' is already the default, but must be filled in on the
|
||||
# pod to pass admission.
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
|
||||
labels:
|
||||
kubernetes.io/cluster-service: 'true'
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
privileged: false
|
||||
volumes:
|
||||
- 'secret'
|
||||
hostNetwork: true
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
runAsUser:
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'RunAsAny'
|
||||
fsGroup:
|
||||
rule: 'RunAsAny'
|
||||
readOnlyRootFilesystem: false
|
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/OWNERS
generated
vendored
@ -1,8 +1,6 @@
|
||||
approvers:
|
||||
- coffeepac
|
||||
- crassirostris
|
||||
- piosz
|
||||
reviewers:
|
||||
- coffeepac
|
||||
- crassirostris
|
||||
- piosz
|
||||
|
29
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml
generated
vendored
29
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml
generated
vendored
@ -117,8 +117,19 @@ data:
|
||||
pos_file /var/log/es-containers.log.pos
|
||||
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
||||
tag raw.kubernetes.*
|
||||
format json
|
||||
read_from_head true
|
||||
<parse>
|
||||
@type multi_format
|
||||
<pattern>
|
||||
format json
|
||||
time_key time
|
||||
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
||||
</pattern>
|
||||
<pattern>
|
||||
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
|
||||
time_format %Y-%m-%dT%H:%M:%S.%N%:z
|
||||
</pattern>
|
||||
</parse>
|
||||
</source>
|
||||
|
||||
# Detect exceptions in the log output and forward them as one log entry.
|
||||
@ -356,6 +367,22 @@ data:
|
||||
read_from_head true
|
||||
tag node-problem-detector
|
||||
</source>
|
||||
|
||||
<source>
|
||||
@id kernel
|
||||
@type systemd
|
||||
filters [{ "_TRANSPORT": "kernel" }]
|
||||
<storage>
|
||||
@type local
|
||||
persistent true
|
||||
</storage>
|
||||
<entry>
|
||||
fields_strip_underscores true
|
||||
fields_lowercase true
|
||||
</entry>
|
||||
read_from_head true
|
||||
tag kernel
|
||||
</source>
|
||||
|
||||
forward.input.conf: |-
|
||||
# Takes the messages sent over TCP
|
||||
|
8
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml
generated
vendored
8
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml
generated
vendored
@ -71,6 +71,7 @@ spec:
|
||||
# Note that this does not guarantee admission on the nodes (#40573).
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: fluentd-es
|
||||
@ -92,9 +93,6 @@ spec:
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
- name: libsystemddir
|
||||
mountPath: /host/lib
|
||||
readOnly: true
|
||||
- name: config-volume
|
||||
mountPath: /etc/fluent/config.d
|
||||
nodeSelector:
|
||||
@ -107,10 +105,6 @@ spec:
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
# It is needed to copy systemd library to decompress journals
|
||||
- name: libsystemddir
|
||||
hostPath:
|
||||
path: /usr/lib64
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: fluentd-es-config-v0.1.4
|
||||
|
@ -20,4 +20,4 @@
|
||||
# For systems without journald
|
||||
mkdir -p /var/log/journal
|
||||
|
||||
/usr/local/bin/fluentd $@
|
||||
exec /usr/local/bin/fluentd $@
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml
generated
vendored
@ -16,6 +16,8 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kibana-logging
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
containers:
|
||||
- name: kibana-logging
|
||||
|
4
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/OWNERS
generated
vendored
4
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/OWNERS
generated
vendored
@ -1,6 +1,6 @@
|
||||
approvers:
|
||||
- crassirostris
|
||||
- piosz
|
||||
- x13n
|
||||
reviewers:
|
||||
- crassirostris
|
||||
- piosz
|
||||
- x13n
|
||||
|
10
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/event-exporter.yaml
generated
vendored
10
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/event-exporter.yaml
generated
vendored
@ -29,11 +29,11 @@ subjects:
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: event-exporter-v0.1.8
|
||||
name: event-exporter-v0.2.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: event-exporter
|
||||
version: v0.1.8
|
||||
version: v0.2.1
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
@ -42,15 +42,15 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: event-exporter
|
||||
version: v0.1.8
|
||||
version: v0.2.1
|
||||
spec:
|
||||
serviceAccountName: event-exporter-sa
|
||||
containers:
|
||||
- name: event-exporter
|
||||
image: k8s.gcr.io/event-exporter:v0.1.8
|
||||
image: k8s.gcr.io/event-exporter:v0.2.1
|
||||
command:
|
||||
- /event-exporter
|
||||
- -sink-opts="-location={{ event_exporter_zone }}"
|
||||
- -sink-opts=-stackdriver-resource-model={{ exporter_sd_resource_model }}
|
||||
# BEGIN_PROMETHEUS_TO_SD
|
||||
- name: prometheus-to-sd-exporter
|
||||
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
|
||||
|
437
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-configmap-old.yaml
generated
vendored
Normal file
437
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-configmap-old.yaml
generated
vendored
Normal file
@ -0,0 +1,437 @@
|
||||
# This ConfigMap is used to ingest logs against old resources like
|
||||
# "gke_container" and "gce_instance" when $LOGGING_STACKDRIVER_RESOURCE_TYPES is
|
||||
# set to "old".
|
||||
# When $LOGGING_STACKDRIVER_RESOURCE_TYPES is set to "new", the ConfigMap in
|
||||
# fluentd-gcp-configmap.yaml will be used for ingesting logs against new
|
||||
# resources like "k8s_container" and "k8s_node".
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
data:
|
||||
containers.input.conf: |-
|
||||
# This configuration file for Fluentd is used
|
||||
# to watch changes to Docker log files that live in the
|
||||
# directory /var/lib/docker/containers/ and are symbolically
|
||||
# linked to from the /var/log/containers directory using names that capture the
|
||||
# pod name and container name. These logs are then submitted to
|
||||
# Google Cloud Logging which assumes the installation of the cloud-logging plug-in.
|
||||
#
|
||||
# Example
|
||||
# =======
|
||||
# A line in the Docker log file might look like this JSON:
|
||||
#
|
||||
# {"log":"2014/09/25 21:15:03 Got request with path wombat\\n",
|
||||
# "stream":"stderr",
|
||||
# "time":"2014-09-25T21:15:03.499185026Z"}
|
||||
#
|
||||
# The original tag is derived from the log file's location.
|
||||
# For example a Docker container's logs might be in the directory:
|
||||
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b
|
||||
# and in the file:
|
||||
# 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
|
||||
# where 997599971ee6... is the Docker ID of the running container.
|
||||
# The Kubernetes kubelet makes a symbolic link to this file on the host
|
||||
# machine in the /var/log/containers directory which includes the pod name,
|
||||
# the namespace name and the Kubernetes container name:
|
||||
# synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
|
||||
# ->
|
||||
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
|
||||
# The /var/log directory on the host is mapped to the /var/log directory in the container
|
||||
# running this instance of Fluentd and we end up collecting the file:
|
||||
# /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
|
||||
# This results in the tag:
|
||||
# var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
|
||||
# where 'synthetic-logger-0.25lps-pod' is the pod name, 'default' is the
|
||||
# namespace name, 'synth-lgr' is the container name and '997599971ee6..' is
|
||||
# the container ID.
|
||||
# The record reformer is used is discard the var.log.containers prefix and
|
||||
# the Docker container ID suffix and "kubernetes." is pre-pended giving the tag:
|
||||
# kubernetes.synthetic-logger-0.25lps-pod_default_synth-lgr
|
||||
# Tag is then parsed by google_cloud plugin and translated to the metadata,
|
||||
# visible in the log viewer
|
||||
|
||||
# Json Log Example:
|
||||
# {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
|
||||
# CRI Log Example:
|
||||
# 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
|
||||
<source>
|
||||
@type tail
|
||||
path /var/log/containers/*.log
|
||||
pos_file /var/log/gcp-containers.log.pos
|
||||
# Tags at this point are in the format of:
|
||||
# reform.var.log.containers.<POD_NAME>_<NAMESPACE_NAME>_<CONTAINER_NAME>-<CONTAINER_ID>.log
|
||||
tag reform.*
|
||||
read_from_head true
|
||||
format multi_format
|
||||
<pattern>
|
||||
format json
|
||||
time_key time
|
||||
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
||||
</pattern>
|
||||
<pattern>
|
||||
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
|
||||
time_format %Y-%m-%dT%H:%M:%S.%N%:z
|
||||
</pattern>
|
||||
</source>
|
||||
|
||||
<filter reform.**>
|
||||
@type parser
|
||||
format /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<log>.*)/
|
||||
reserve_data true
|
||||
suppress_parse_error_log true
|
||||
emit_invalid_record_to_error false
|
||||
key_name log
|
||||
</filter>
|
||||
|
||||
<match reform.**>
|
||||
@type record_reformer
|
||||
enable_ruby true
|
||||
# Tags at this point are in the format of:
|
||||
# 'raw.kubernetes.<POD_NAME>_<NAMESPACE_NAME>_<CONTAINER_NAME>'.
|
||||
tag raw.kubernetes.${tag_suffix[4].split('-')[0..-2].join('-')}
|
||||
</match>
|
||||
|
||||
# Detect exceptions in the log output and forward them as one log entry.
|
||||
<match raw.kubernetes.**>
|
||||
@type detect_exceptions
|
||||
|
||||
remove_tag_prefix raw
|
||||
message log
|
||||
stream stream
|
||||
multiline_flush_interval 5
|
||||
max_bytes 500000
|
||||
max_lines 1000
|
||||
</match>
|
||||
system.input.conf: |-
|
||||
# Example:
|
||||
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
|
||||
<source>
|
||||
@type tail
|
||||
format syslog
|
||||
path /var/log/startupscript.log
|
||||
pos_file /var/log/gcp-startupscript.log.pos
|
||||
tag startupscript
|
||||
</source>
|
||||
|
||||
# Examples:
|
||||
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
|
||||
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
|
||||
# TODO(random-liu): Remove this after cri container runtime rolls out.
|
||||
<source>
|
||||
@type tail
|
||||
format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
|
||||
path /var/log/docker.log
|
||||
pos_file /var/log/gcp-docker.log.pos
|
||||
tag docker
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
|
||||
<source>
|
||||
@type tail
|
||||
# Not parsing this, because it doesn't have anything particularly useful to
|
||||
# parse out of it (like severities).
|
||||
format none
|
||||
path /var/log/etcd.log
|
||||
pos_file /var/log/gcp-etcd.log.pos
|
||||
tag etcd
|
||||
</source>
|
||||
|
||||
# Multi-line parsing is required for all the kube logs because very large log
|
||||
# statements, such as those that include entire object bodies, get split into
|
||||
# multiple lines by glog.
|
||||
|
||||
# Example:
|
||||
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
|
||||
<source>
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/kubelet.log
|
||||
pos_file /var/log/gcp-kubelet.log.pos
|
||||
tag kubelet
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
|
||||
<source>
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/kube-proxy.log
|
||||
pos_file /var/log/gcp-kube-proxy.log.pos
|
||||
tag kube-proxy
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
|
||||
<source>
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/kube-apiserver.log
|
||||
pos_file /var/log/gcp-kube-apiserver.log.pos
|
||||
tag kube-apiserver
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
|
||||
<source>
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/kube-controller-manager.log
|
||||
pos_file /var/log/gcp-kube-controller-manager.log.pos
|
||||
tag kube-controller-manager
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
|
||||
<source>
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/kube-scheduler.log
|
||||
pos_file /var/log/gcp-kube-scheduler.log.pos
|
||||
tag kube-scheduler
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
|
||||
<source>
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/rescheduler.log
|
||||
pos_file /var/log/gcp-rescheduler.log.pos
|
||||
tag rescheduler
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
|
||||
<source>
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/glbc.log
|
||||
pos_file /var/log/gcp-glbc.log.pos
|
||||
tag glbc
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
|
||||
<source>
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/cluster-autoscaler.log
|
||||
pos_file /var/log/gcp-cluster-autoscaler.log.pos
|
||||
tag cluster-autoscaler
|
||||
</source>
|
||||
|
||||
# Logs from systemd-journal for interesting services.
|
||||
# TODO(random-liu): Keep this for compatibility, remove this after
|
||||
# cri container runtime rolls out.
|
||||
<source>
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "docker.service" }]
|
||||
pos_file /var/log/gcp-journald-docker.pos
|
||||
read_from_head true
|
||||
tag docker
|
||||
</source>
|
||||
|
||||
<source>
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "{{ container_runtime }}.service" }]
|
||||
pos_file /var/log/gcp-journald-container-runtime.pos
|
||||
read_from_head true
|
||||
tag container-runtime
|
||||
</source>
|
||||
|
||||
<source>
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
|
||||
pos_file /var/log/gcp-journald-kubelet.pos
|
||||
read_from_head true
|
||||
tag kubelet
|
||||
</source>
|
||||
|
||||
<source>
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
|
||||
pos_file /var/log/gcp-journald-node-problem-detector.pos
|
||||
read_from_head true
|
||||
tag node-problem-detector
|
||||
</source>
|
||||
|
||||
# BEGIN_NODE_JOURNAL
|
||||
# Whether to include node-journal or not is determined when starting the
|
||||
# cluster. It is not changed when the cluster is already running.
|
||||
<source>
|
||||
@type systemd
|
||||
pos_file /var/log/gcp-journald.pos
|
||||
read_from_head true
|
||||
tag node-journal
|
||||
</source>
|
||||
|
||||
<filter node-journal>
|
||||
@type grep
|
||||
<exclude>
|
||||
key _SYSTEMD_UNIT
|
||||
pattern ^(docker|{{ container_runtime }}|kubelet|node-problem-detector)\.service$
|
||||
</exclude>
|
||||
</filter>
|
||||
# END_NODE_JOURNAL
|
||||
monitoring.conf: |-
|
||||
# This source is used to acquire approximate process start timestamp,
|
||||
# which purpose is explained before the corresponding output plugin.
|
||||
<source>
|
||||
@type exec
|
||||
command /bin/sh -c 'date +%s'
|
||||
tag process_start
|
||||
time_format %Y-%m-%d %H:%M:%S
|
||||
keys process_start_timestamp
|
||||
</source>
|
||||
|
||||
# This filter is used to convert process start timestamp to integer
|
||||
# value for correct ingestion in the prometheus output plugin.
|
||||
<filter process_start>
|
||||
@type record_transformer
|
||||
enable_ruby true
|
||||
auto_typecast true
|
||||
<record>
|
||||
process_start_timestamp ${record["process_start_timestamp"].to_i}
|
||||
</record>
|
||||
</filter>
|
||||
output.conf: |-
|
||||
# This match is placed before the all-matching output to provide metric
|
||||
# exporter with a process start timestamp for correct exporting of
|
||||
# cumulative metrics to Stackdriver.
|
||||
<match process_start>
|
||||
@type prometheus
|
||||
|
||||
<metric>
|
||||
type gauge
|
||||
name process_start_time_seconds
|
||||
desc Timestamp of the process start in seconds
|
||||
key process_start_timestamp
|
||||
</metric>
|
||||
</match>
|
||||
|
||||
# This filter allows to count the number of log entries read by fluentd
|
||||
# before they are processed by the output plugin. This in turn allows to
|
||||
# monitor the number of log entries that were read but never sent, e.g.
|
||||
# because of liveness probe removing buffer.
|
||||
<filter **>
|
||||
@type prometheus
|
||||
<metric>
|
||||
type counter
|
||||
name logging_entry_count
|
||||
desc Total number of log entries generated by either application containers or system components
|
||||
</metric>
|
||||
</filter>
|
||||
|
||||
# TODO(instrumentation): Reconsider this workaround later.
|
||||
# Trim the entries which exceed slightly less than 100KB, to avoid
|
||||
# dropping them. It is a necessity, because Stackdriver only supports
|
||||
# entries that are up to 100KB in size.
|
||||
<filter kubernetes.**>
|
||||
@type record_transformer
|
||||
enable_ruby true
|
||||
<record>
|
||||
log ${record['log'].length > 100000 ? "[Trimmed]#{record['log'][0..100000]}..." : record['log']}
|
||||
</record>
|
||||
</filter>
|
||||
|
||||
# Do not collect fluentd's own logs to avoid infinite loops.
|
||||
<match fluent.**>
|
||||
@type null
|
||||
</match>
|
||||
|
||||
# We use 2 output stanzas - one to handle the container logs and one to handle
|
||||
# the node daemon logs, the latter of which explicitly sends its logs to the
|
||||
# compute.googleapis.com service rather than container.googleapis.com to keep
|
||||
# them separate since most users don't care about the node logs.
|
||||
<match kubernetes.**>
|
||||
@type google_cloud
|
||||
|
||||
# Try to detect JSON formatted log entries.
|
||||
detect_json true
|
||||
# Collect metrics in Prometheus registry about plugin activity.
|
||||
enable_monitoring true
|
||||
monitoring_type prometheus
|
||||
# Allow log entries from multiple containers to be sent in the same request.
|
||||
split_logs_by_tag false
|
||||
# Set the buffer type to file to improve the reliability and reduce the memory consumption
|
||||
buffer_type file
|
||||
buffer_path /var/log/fluentd-buffers/kubernetes.containers.buffer
|
||||
# Set queue_full action to block because we want to pause gracefully
|
||||
# in case of the off-the-limits load instead of throwing an exception
|
||||
buffer_queue_full_action block
|
||||
# Set the chunk limit conservatively to avoid exceeding the recommended
|
||||
# chunk size of 5MB per write request.
|
||||
buffer_chunk_limit 1M
|
||||
# Cap the combined memory usage of this buffer and the one below to
|
||||
# 1MiB/chunk * (6 + 2) chunks = 8 MiB
|
||||
buffer_queue_limit 6
|
||||
# Never wait more than 5 seconds before flushing logs in the non-error case.
|
||||
flush_interval 5s
|
||||
# Never wait longer than 30 seconds between retries.
|
||||
max_retry_wait 30
|
||||
# Disable the limit on the number of retries (retry forever).
|
||||
disable_retry_limit
|
||||
# Use multiple threads for processing.
|
||||
num_threads 2
|
||||
use_grpc true
|
||||
</match>
|
||||
|
||||
# Keep a smaller buffer here since these logs are less important than the user's
|
||||
# container logs.
|
||||
<match **>
|
||||
@type google_cloud
|
||||
|
||||
detect_json true
|
||||
enable_monitoring true
|
||||
monitoring_type prometheus
|
||||
# Allow entries from multiple system logs to be sent in the same request.
|
||||
split_logs_by_tag false
|
||||
detect_subservice false
|
||||
buffer_type file
|
||||
buffer_path /var/log/fluentd-buffers/kubernetes.system.buffer
|
||||
buffer_queue_full_action block
|
||||
buffer_chunk_limit 1M
|
||||
buffer_queue_limit 2
|
||||
flush_interval 5s
|
||||
max_retry_wait 30
|
||||
disable_retry_limit
|
||||
num_threads 2
|
||||
use_grpc true
|
||||
</match>
|
||||
metadata:
|
||||
name: fluentd-gcp-config-old-v1.2.5
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
158
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml
generated
vendored
158
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml
generated
vendored
@ -1,3 +1,9 @@
|
||||
# This ConfigMap is used to ingest logs against new resources like
|
||||
# "k8s_container" and "k8s_node" when $LOGGING_STACKDRIVER_RESOURCE_TYPES is set
|
||||
# to "new".
|
||||
# When $LOGGING_STACKDRIVER_RESOURCE_TYPES is set to "old", the ConfigMap in
|
||||
# fluentd-gcp-configmap-old.yaml will be used for ingesting logs against old
|
||||
# resources like "gke_container" and "gce_instance".
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
data:
|
||||
@ -17,29 +23,34 @@ data:
|
||||
# "stream":"stderr",
|
||||
# "time":"2014-09-25T21:15:03.499185026Z"}
|
||||
#
|
||||
# The record reformer is used to write the tag to focus on the pod name
|
||||
# and the Kubernetes container name. For example a Docker container's logs
|
||||
# might be in the directory:
|
||||
# The original tag is derived from the log file's location.
|
||||
# For example a Docker container's logs might be in the directory:
|
||||
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b
|
||||
# and in the file:
|
||||
# 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
|
||||
# where 997599971ee6... is the Docker ID of the running container.
|
||||
# The Kubernetes kubelet makes a symbolic link to this file on the host machine
|
||||
# in the /var/log/containers directory which includes the pod name and the Kubernetes
|
||||
# container name:
|
||||
# synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
|
||||
# The Kubernetes kubelet makes a symbolic link to this file on the host
|
||||
# machine in the /var/log/containers directory which includes the pod name,
|
||||
# the namespace name and the Kubernetes container name:
|
||||
# synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
|
||||
# ->
|
||||
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
|
||||
# The /var/log directory on the host is mapped to the /var/log directory in the container
|
||||
# running this instance of Fluentd and we end up collecting the file:
|
||||
# /var/log/containers/synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
|
||||
# /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
|
||||
# This results in the tag:
|
||||
# var.log.containers.synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
|
||||
# The record reformer is used is discard the var.log.containers prefix and
|
||||
# the Docker container ID suffix and "kubernetes." is pre-pended giving the tag:
|
||||
# kubernetes.synthetic-logger-0.25lps-pod_default-synth-lgr
|
||||
# Tag is then parsed by google_cloud plugin and translated to the metadata,
|
||||
# visible in the log viewer
|
||||
# var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
|
||||
# where 'synthetic-logger-0.25lps-pod' is the pod name, 'default' is the
|
||||
# namespace name, 'synth-lgr' is the container name and '997599971ee6..' is
|
||||
# the container ID.
|
||||
# The record reformer is used to extract pod_name, namespace_name and
|
||||
# container_name from the tag and set them in a local_resource_id in the
|
||||
# format of:
|
||||
# 'k8s_container.<NAMESPACE_NAME>.<POD_NAME>.<CONTAINER_NAME>'.
|
||||
# The reformer also changes the tags to 'stderr' or 'stdout' based on the
|
||||
# value of 'stream'.
|
||||
# local_resource_id is later used by google_cloud plugin to determine the
|
||||
# monitored resource to ingest logs against.
|
||||
|
||||
# Json Log Example:
|
||||
# {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
|
||||
@ -49,28 +60,22 @@ data:
|
||||
@type tail
|
||||
path /var/log/containers/*.log
|
||||
pos_file /var/log/gcp-containers.log.pos
|
||||
# Tags at this point are in the format of:
|
||||
# reform.var.log.containers.<POD_NAME>_<NAMESPACE_NAME>_<CONTAINER_NAME>-<CONTAINER_ID>.log
|
||||
tag reform.*
|
||||
read_from_head true
|
||||
format none
|
||||
format multi_format
|
||||
<pattern>
|
||||
format json
|
||||
time_key time
|
||||
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
||||
</pattern>
|
||||
<pattern>
|
||||
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
|
||||
time_format %Y-%m-%dT%H:%M:%S.%N%:z
|
||||
</pattern>
|
||||
</source>
|
||||
|
||||
<filter reform.**>
|
||||
@type parser
|
||||
key_name message
|
||||
<parse>
|
||||
@type multi_format
|
||||
<pattern>
|
||||
format json
|
||||
time_key time
|
||||
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
||||
</pattern>
|
||||
<pattern>
|
||||
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
|
||||
time_format %Y-%m-%dT%H:%M:%S.%N%:z
|
||||
</pattern>
|
||||
</parse>
|
||||
</filter>
|
||||
|
||||
<filter reform.**>
|
||||
@type parser
|
||||
format /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<log>.*)/
|
||||
@ -83,11 +88,23 @@ data:
|
||||
<match reform.**>
|
||||
@type record_reformer
|
||||
enable_ruby true
|
||||
tag raw.kubernetes.${tag_suffix[4].split('-')[0..-2].join('-')}
|
||||
<record>
|
||||
# Extract local_resource_id from tag for 'k8s_container' monitored
|
||||
# resource. The format is:
|
||||
# 'k8s_container.<namespace_name>.<pod_name>.<container_name>'.
|
||||
"logging.googleapis.com/local_resource_id" ${"k8s_container.#{tag_suffix[4].rpartition('.')[0].split('_')[1]}.#{tag_suffix[4].rpartition('.')[0].split('_')[0]}.#{tag_suffix[4].rpartition('.')[0].split('_')[2].rpartition('-')[0]}"}
|
||||
# Rename the field 'log' to a more generic field 'message'. This way the
|
||||
# fluent-plugin-google-cloud knows to flatten the field as textPayload
|
||||
# instead of jsonPayload after extracting 'time', 'severity' and
|
||||
# 'stream' from the record.
|
||||
message ${record['log']}
|
||||
</record>
|
||||
tag ${if record['stream'] == 'stderr' then 'raw.stderr' else 'raw.stdout' end}
|
||||
remove_keys stream,log
|
||||
</match>
|
||||
|
||||
# Detect exceptions in the log output and forward them as one log entry.
|
||||
<match raw.kubernetes.**>
|
||||
<match {raw.stderr,raw.stdout}>
|
||||
@type detect_exceptions
|
||||
|
||||
remove_tag_prefix raw
|
||||
@ -282,6 +299,25 @@ data:
|
||||
read_from_head true
|
||||
tag node-problem-detector
|
||||
</source>
|
||||
|
||||
# BEGIN_NODE_JOURNAL
|
||||
# Whether to include node-journal or not is determined when starting the
|
||||
# cluster. It is not changed when the cluster is already running.
|
||||
<source>
|
||||
@type systemd
|
||||
pos_file /var/log/gcp-journald.pos
|
||||
read_from_head true
|
||||
tag node-journal
|
||||
</source>
|
||||
|
||||
<filter node-journal>
|
||||
@type grep
|
||||
<exclude>
|
||||
key _SYSTEMD_UNIT
|
||||
pattern ^(docker|{{ container_runtime }}|kubelet|node-problem-detector)\.service$
|
||||
</exclude>
|
||||
</filter>
|
||||
# END_NODE_JOURNAL
|
||||
monitoring.conf: |-
|
||||
# This source is used to acquire approximate process start timestamp,
|
||||
# which purpose is explained before the corresponding output plugin.
|
||||
@ -331,23 +367,30 @@ data:
|
||||
</metric>
|
||||
</filter>
|
||||
|
||||
# This section is exclusive for k8s_container logs. Those come with
|
||||
# 'stderr'/'stdout' tags.
|
||||
# TODO(instrumentation): Reconsider this workaround later.
|
||||
# Trim the entries which exceed slightly less than 100KB, to avoid
|
||||
# dropping them. It is a necessity, because Stackdriver only supports
|
||||
# entries that are up to 100KB in size.
|
||||
<filter kubernetes.**>
|
||||
<filter {stderr,stdout}>
|
||||
@type record_transformer
|
||||
enable_ruby true
|
||||
<record>
|
||||
log ${record['log'].length > 100000 ? "[Trimmed]#{record['log'][0..100000]}..." : record['log']}
|
||||
message ${record['message'].length > 100000 ? "[Trimmed]#{record['message'][0..100000]}..." : record['message']}
|
||||
</record>
|
||||
</filter>
|
||||
|
||||
# We use 2 output stanzas - one to handle the container logs and one to handle
|
||||
# the node daemon logs, the latter of which explicitly sends its logs to the
|
||||
# compute.googleapis.com service rather than container.googleapis.com to keep
|
||||
# them separate since most users don't care about the node logs.
|
||||
<match kubernetes.**>
|
||||
# Do not collect fluentd's own logs to avoid infinite loops.
|
||||
<match fluent.**>
|
||||
@type null
|
||||
</match>
|
||||
|
||||
# This section is exclusive for k8s_container logs. These logs come with
|
||||
# 'stderr'/'stdout' tags.
|
||||
# We use a separate output stanza for 'k8s_node' logs with a smaller buffer
|
||||
# because node logs are less important than user's container logs.
|
||||
<match {stderr,stdout}>
|
||||
@type google_cloud
|
||||
|
||||
# Try to detect JSON formatted log entries.
|
||||
@ -377,15 +420,24 @@ data:
|
||||
disable_retry_limit
|
||||
# Use multiple threads for processing.
|
||||
num_threads 2
|
||||
labels {
|
||||
# The logging backend will take responsibility for double writing to
|
||||
# the necessary resource types when this label is set.
|
||||
"logging.googleapis.com/k8s_compatibility": "true"
|
||||
}
|
||||
use_grpc true
|
||||
# Use Metadata Agent to get monitored resource.
|
||||
enable_metadata_agent true
|
||||
</match>
|
||||
|
||||
# Keep a smaller buffer here since these logs are less important than the user's
|
||||
# container logs.
|
||||
# Attach local_resource_id for 'k8s_node' monitored resource.
|
||||
<filter **>
|
||||
@type record_transformer
|
||||
enable_ruby true
|
||||
<record>
|
||||
"logging.googleapis.com/local_resource_id" ${"k8s_node.#{ENV['NODE_NAME']}"}
|
||||
</record>
|
||||
</filter>
|
||||
|
||||
# This section is exclusive for 'k8s_node' logs. These logs come with tags
|
||||
# that are neither 'stderr' or 'stdout'.
|
||||
# We use a separate output stanza for 'k8s_container' logs with a larger
|
||||
# buffer because user's container logs are more important than node logs.
|
||||
<match **>
|
||||
@type google_cloud
|
||||
|
||||
@ -404,14 +456,12 @@ data:
|
||||
max_retry_wait 30
|
||||
disable_retry_limit
|
||||
num_threads 2
|
||||
labels {
|
||||
# The logging backend will take responsibility for double writing to
|
||||
# the necessary resource types when this label is set.
|
||||
"logging.googleapis.com/k8s_compatibility": "true"
|
||||
}
|
||||
use_grpc true
|
||||
# Use Metadata Agent to get monitored resource.
|
||||
enable_metadata_agent true
|
||||
</match>
|
||||
metadata:
|
||||
name: fluentd-gcp-config-v1.2.4
|
||||
name: fluentd-gcp-config-v1.2.5
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
|
24
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml
generated
vendored
24
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml
generated
vendored
@ -35,11 +35,16 @@ spec:
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
- name: libsystemddir
|
||||
mountPath: /host/lib
|
||||
readOnly: true
|
||||
- name: config-volume
|
||||
mountPath: /etc/google-fluentd/config.d
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: STACKDRIVER_METADATA_AGENT_URL
|
||||
value: http://$(NODE_NAME):8799
|
||||
# Liveness probe is aimed to help in situarions where fluentd
|
||||
# silently hangs for no apparent reasons until manual restart.
|
||||
# The idea of this probe is that if fluentd is not queueing or
|
||||
@ -61,14 +66,14 @@ spec:
|
||||
then
|
||||
exit 1;
|
||||
fi;
|
||||
LAST_MODIFIED_DATE=`stat /var/log/fluentd-buffers | grep Modify | sed -r "s/Modify: (.*)/\1/"`;
|
||||
LAST_MODIFIED_TIMESTAMP=`date -d "$LAST_MODIFIED_DATE" +%s`;
|
||||
if [ `date +%s` -gt `expr $LAST_MODIFIED_TIMESTAMP + $STUCK_THRESHOLD_SECONDS` ];
|
||||
touch -d "${STUCK_THRESHOLD_SECONDS} seconds ago" /tmp/marker-stuck;
|
||||
if [[ -z "$(find /var/log/fluentd-buffers -type f -newer /tmp/marker-stuck -print -quit)" ]];
|
||||
then
|
||||
rm -rf /var/log/fluentd-buffers;
|
||||
exit 1;
|
||||
fi;
|
||||
if [ `date +%s` -gt `expr $LAST_MODIFIED_TIMESTAMP + $LIVENESS_THRESHOLD_SECONDS` ];
|
||||
touch -d "${LIVENESS_THRESHOLD_SECONDS} seconds ago" /tmp/marker-liveness;
|
||||
if [[ -z "$(find /var/log/fluentd-buffers -type f -newer /tmp/marker-liveness -print -quit)" ]];
|
||||
then
|
||||
exit 1;
|
||||
fi;
|
||||
@ -109,9 +114,6 @@ spec:
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
- name: libsystemddir
|
||||
hostPath:
|
||||
path: /usr/lib64
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: fluentd-gcp-config-v1.2.4
|
||||
name: {{ fluentd_gcp_configmap_name }}-v1.2.5
|
||||
|
@ -8,7 +8,7 @@ metadata:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- policy
|
||||
resourceNames:
|
||||
- gce.event-exporter
|
||||
resources:
|
||||
|
@ -1,11 +1,11 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: gce.event-exporter
|
||||
annotations:
|
||||
kubernetes.io/description: 'Policy used by the event-exporter addon.'
|
||||
# TODO: event-exporter should run with the default seccomp profile
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default,docker/default'
|
||||
# 'runtime/default' is already the default, but must be filled in on the
|
||||
# pod to pass admission.
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||
|
@ -8,7 +8,7 @@ metadata:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- policy
|
||||
resourceNames:
|
||||
- gce.fluentd-gcp
|
||||
resources:
|
||||
|
@ -1,11 +1,11 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: gce.fluentd-gcp
|
||||
annotations:
|
||||
kubernetes.io/description: 'Policy used by the fluentd-gcp addon.'
|
||||
# TODO: fluentd-gcp should run with the default seccomp profile
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default,docker/default'
|
||||
# 'runtime/default' is already the default, but must be filled in on the
|
||||
# pod to pass admission.
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||
|
8
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/scaler-deployment.yaml
generated
vendored
8
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/scaler-deployment.yaml
generated
vendored
@ -5,7 +5,7 @@ metadata:
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: fluentd-gcp-scaler
|
||||
version: v0.1.0
|
||||
version: v0.3.0
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
selector:
|
||||
@ -19,7 +19,7 @@ spec:
|
||||
serviceAccountName: fluentd-gcp-scaler
|
||||
containers:
|
||||
- name: fluentd-gcp-scaler
|
||||
image: gcr.io/google-containers/fluentd-gcp-scaler:0.1
|
||||
image: k8s.gcr.io/fluentd-gcp-scaler:0.3
|
||||
command:
|
||||
- /scaler.sh
|
||||
- --ds-name=fluentd-gcp-v3.0.0
|
||||
@ -30,5 +30,7 @@ spec:
|
||||
value: 100m
|
||||
- name: MEMORY_REQUEST
|
||||
value: 200Mi
|
||||
- name: CPU_LIMIT
|
||||
value: 1000m
|
||||
- name: MEMORY_LIMIT
|
||||
value: 300Mi
|
||||
value: 500Mi
|
||||
|
5468
vendor/k8s.io/kubernetes/cluster/addons/istio/auth/istio-auth.yaml
generated
vendored
5468
vendor/k8s.io/kubernetes/cluster/addons/istio/auth/istio-auth.yaml
generated
vendored
File diff suppressed because it is too large
Load Diff
5455
vendor/k8s.io/kubernetes/cluster/addons/istio/noauth/istio.yaml
generated
vendored
5455
vendor/k8s.io/kubernetes/cluster/addons/istio/noauth/istio.yaml
generated
vendored
File diff suppressed because it is too large
Load Diff
2
vendor/k8s.io/kubernetes/cluster/addons/kube-proxy/kube-proxy-ds.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/kube-proxy/kube-proxy-ds.yaml
generated
vendored
@ -24,7 +24,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
{{pod_priority}}
|
||||
priorityClassName: system-node-critical
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/kube-proxy-ds-ready: "true"
|
||||
|
@ -1,7 +1,7 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: system:metadata-agent
|
||||
name: stackdriver:metadata-agent
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
@ -20,14 +20,14 @@ rules:
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: system:metadata-agent
|
||||
name: stackdriver:metadata-agent
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:metadata-agent
|
||||
name: stackdriver:metadata-agent
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metadata-agent
|
||||
|
106
vendor/k8s.io/kubernetes/cluster/addons/metadata-agent/stackdriver/metadata-agent.yaml
generated
vendored
106
vendor/k8s.io/kubernetes/cluster/addons/metadata-agent/stackdriver/metadata-agent.yaml
generated
vendored
@ -7,6 +7,22 @@ metadata:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: metadata-agent-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
data:
|
||||
node_level.conf: |-
|
||||
KubernetesUseWatch: true
|
||||
KubernetesClusterLevelMetadata: false
|
||||
cluster_level.conf: |-
|
||||
KubernetesUseWatch: true
|
||||
KubernetesClusterLevelMetadata: true
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
@ -24,12 +40,32 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app: metadata-agent
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
serviceAccountName: metadata-agent
|
||||
containers:
|
||||
- image: gcr.io/stackdriver-agents/stackdriver-metadata-agent:{{ metadata_agent_version }}
|
||||
- image: gcr.io/stackdriver-agents/stackdriver-metadata-agent:0.2-0.0.19-1
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: metadata-agent
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
if [[ -f /var/run/metadata-agent/health/unhealthy ]]; then
|
||||
exit 1;
|
||||
fi
|
||||
periodSeconds: 10
|
||||
failureThreshold: 1
|
||||
successThreshold: 1
|
||||
volumeMounts:
|
||||
- name: metadata-agent-config-volume
|
||||
mountPath: /etc/config
|
||||
command:
|
||||
- /opt/stackdriver/metadata/sbin/metadatad
|
||||
- --config-file=/etc/config/node_level.conf
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
hostPort: 8799
|
||||
@ -42,7 +78,75 @@ spec:
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: metadata-agent-config-volume
|
||||
configMap:
|
||||
name: metadata-agent-config
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1beta1
|
||||
metadata:
|
||||
labels:
|
||||
app: metadata-agent-cluster-level
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: metadata-agent-cluster-level
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: metadata-agent-cluster-level
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: metadata-agent-cluster-level
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
serviceAccountName: metadata-agent
|
||||
containers:
|
||||
- image: gcr.io/stackdriver-agents/stackdriver-metadata-agent:0.2-0.0.19-1
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: metadata-agent
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
if [[ -f /var/run/metadata-agent/health/unhealthy ]]; then
|
||||
exit 1;
|
||||
fi
|
||||
periodSeconds: 10
|
||||
failureThreshold: 1
|
||||
successThreshold: 1
|
||||
volumeMounts:
|
||||
- name: metadata-agent-config-volume
|
||||
mountPath: /etc/config
|
||||
command:
|
||||
- /opt/stackdriver/metadata/sbin/metadatad
|
||||
- --config-file=/etc/config/cluster_level.conf
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
cpu: {{ metadata_agent_cluster_level_cpu_request }}
|
||||
memory: {{ metadata_agent_cluster_level_memory_request }}
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: metadata-agent-config-volume
|
||||
configMap:
|
||||
name: metadata-agent-config
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
|
5
vendor/k8s.io/kubernetes/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml
generated
vendored
5
vendor/k8s.io/kubernetes/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml
generated
vendored
@ -37,6 +37,11 @@ spec:
|
||||
serviceAccountName: metadata-proxy
|
||||
hostNetwork: true
|
||||
dnsPolicy: Default
|
||||
tolerations:
|
||||
- operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
- operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
containers:
|
||||
- name: metadata-proxy
|
||||
image: k8s.gcr.io/metadata-proxy:v0.1.9
|
||||
|
20
vendor/k8s.io/kubernetes/cluster/addons/metrics-server/README.md
generated
vendored
Normal file
20
vendor/k8s.io/kubernetes/cluster/addons/metrics-server/README.md
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
# Metrics Server
|
||||
|
||||
[Metrics Server](https://github.com/kubernetes-incubator/metrics-server) exposes
|
||||
core Kubernetes metrics via metrics API.
|
||||
|
||||
More details can be found in [Core metrics pipeline documentation](https://kubernetes.io/docs/tasks/debug-application-cluster/core-metrics-pipeline/).
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Metrics Server supports up to 30 pods per cluster node. In clusters where there are more running pods, Metrics Server may be throttled or fail with OOM error. Starting with Kubernetes 1.9.2, Metrics Server resource requirements may be overwritten manually. [Learn more about Addon Resizer configuration](https://github.com/kubernetes/autoscaler/tree/master/addon-resizer#addon-resizer-configuration)
|
||||
|
||||
### Important notices
|
||||
|
||||
Decreasing resource requirements for cluster addons may cause system instability. The effects may include (but are not limited to):
|
||||
- Horizontal Pod Autoscaler not working
|
||||
- `kubectl top` not working (starting with Kubernetes 1.10)
|
||||
|
||||
Overwritten configuration persists through cluster updates, therefore may cause all effects above after a cluster update.
|
||||
|
||||
[]()
|
@ -43,6 +43,7 @@ spec:
|
||||
version: v0.2.1
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
serviceAccountName: metrics-server
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/addons/node-problem-detector/npd.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/node-problem-detector/npd.yaml
generated
vendored
@ -48,7 +48,7 @@ spec:
|
||||
- "/bin/sh"
|
||||
- "-c"
|
||||
# Pass both config to support both journald and syslog.
|
||||
- "/node-problem-detector --logtostderr --system-log-monitors=/config/kernel-monitor.json,/config/kernel-monitor-filelog.json,/config/docker-monitor.json,/config/docker-monitor-filelog.json >>/var/log/node-problem-detector.log 2>&1"
|
||||
- "exec /node-problem-detector --logtostderr --system-log-monitors=/config/kernel-monitor.json,/config/kernel-monitor-filelog.json,/config/docker-monitor.json,/config/docker-monitor-filelog.json >>/var/log/node-problem-detector.log 2>&1"
|
||||
securityContext:
|
||||
privileged: true
|
||||
resources:
|
||||
|
11
vendor/k8s.io/kubernetes/cluster/addons/prometheus/OWNERS
generated
vendored
Normal file
11
vendor/k8s.io/kubernetes/cluster/addons/prometheus/OWNERS
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
approvers:
|
||||
- kawych
|
||||
- piosz
|
||||
- serathius
|
||||
- brancz
|
||||
reviewers:
|
||||
- kawych
|
||||
- piosz
|
||||
- serathius
|
||||
- brancz
|
||||
|
5
vendor/k8s.io/kubernetes/cluster/addons/prometheus/README.md
generated
vendored
Normal file
5
vendor/k8s.io/kubernetes/cluster/addons/prometheus/README.md
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
# Prometheus Add-on
|
||||
|
||||
This add-on is an experimental configuration of k8s monitoring using Prometheus used for e2e tests.
|
||||
|
||||
For production use check out more mature setups like [Prometheus Operator](https://github.com/coreos/prometheus-operator) and [kube-prometheus](https://github.com/coreos/prometheus-operator/tree/master/contrib/kube-prometheus).
|
18
vendor/k8s.io/kubernetes/cluster/addons/prometheus/alertmanager-configmap.yaml
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/cluster/addons/prometheus/alertmanager-configmap.yaml
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: alertmanager-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
alertmanager.yml: |
|
||||
global: null
|
||||
receivers:
|
||||
- name: default-receiver
|
||||
route:
|
||||
group_interval: 5m
|
||||
group_wait: 10s
|
||||
receiver: default-receiver
|
||||
repeat_interval: 3h
|
78
vendor/k8s.io/kubernetes/cluster/addons/prometheus/alertmanager-deployment.yaml
generated
vendored
Normal file
78
vendor/k8s.io/kubernetes/cluster/addons/prometheus/alertmanager-deployment.yaml
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: alertmanager
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: alertmanager
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v0.14.0
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: alertmanager
|
||||
version: v0.14.0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: alertmanager
|
||||
version: v0.14.0
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: prometheus-alertmanager
|
||||
image: "prom/alertmanager:v0.14.0"
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
args:
|
||||
- --config.file=/etc/config/alertmanager.yml
|
||||
- --storage.path=/data
|
||||
- --web.external-url=/
|
||||
ports:
|
||||
- containerPort: 9093
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /#/status
|
||||
port: 9093
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config
|
||||
- name: storage-volume
|
||||
mountPath: "/data"
|
||||
subPath: ""
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 50Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 50Mi
|
||||
- name: prometheus-alertmanager-configmap-reload
|
||||
image: "jimmidyson/configmap-reload:v0.1"
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
args:
|
||||
- --volume-dir=/etc/config
|
||||
- --webhook-url=http://localhost:9093/-/reload
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config
|
||||
readOnly: true
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 10Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 10Mi
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: alertmanager-config
|
||||
- name: storage-volume
|
||||
persistentVolumeClaim:
|
||||
claimName: alertmanager
|
15
vendor/k8s.io/kubernetes/cluster/addons/prometheus/alertmanager-pvc.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/prometheus/alertmanager-pvc.yaml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: alertmanager
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
spec:
|
||||
storageClassName: standard
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: "2Gi"
|
18
vendor/k8s.io/kubernetes/cluster/addons/prometheus/alertmanager-service.yaml
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/cluster/addons/prometheus/alertmanager-service.yaml
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: alertmanager
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "Alertmanager"
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: 9093
|
||||
selector:
|
||||
k8s-app: alertmanager
|
||||
type: "ClusterIP"
|
67
vendor/k8s.io/kubernetes/cluster/addons/prometheus/kube-state-metrics-deployment.yaml
generated
vendored
Normal file
67
vendor/k8s.io/kubernetes/cluster/addons/prometheus/kube-state-metrics-deployment.yaml
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kube-state-metrics
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-state-metrics
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.3.0
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-state-metrics
|
||||
version: v1.3.0
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-state-metrics
|
||||
version: v1.3.0
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
serviceAccountName: kube-state-metrics
|
||||
containers:
|
||||
- name: kube-state-metrics
|
||||
image: quay.io/coreos/kube-state-metrics:v1.3.0
|
||||
ports:
|
||||
- name: http-metrics
|
||||
containerPort: 8080
|
||||
- name: telemetry
|
||||
containerPort: 8081
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
- name: addon-resizer
|
||||
image: k8s.gcr.io/addon-resizer:1.7
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 30Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 30Mi
|
||||
env:
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
command:
|
||||
- /pod_nanny
|
||||
- --container=kube-state-metrics
|
||||
- --cpu=100m
|
||||
- --extra-cpu=1m
|
||||
- --memory=100Mi
|
||||
- --extra-memory=2Mi
|
||||
- --threshold=5
|
||||
- --deployment=kube-state-metrics
|
103
vendor/k8s.io/kubernetes/cluster/addons/prometheus/kube-state-metrics-rbac.yaml
generated
vendored
Normal file
103
vendor/k8s.io/kubernetes/cluster/addons/prometheus/kube-state-metrics-rbac.yaml
generated
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-state-metrics
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: kube-state-metrics
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- configmaps
|
||||
- secrets
|
||||
- nodes
|
||||
- pods
|
||||
- services
|
||||
- resourcequotas
|
||||
- replicationcontrollers
|
||||
- limitranges
|
||||
- persistentvolumeclaims
|
||||
- persistentvolumes
|
||||
- namespaces
|
||||
- endpoints
|
||||
verbs: ["list", "watch"]
|
||||
- apiGroups: ["extensions"]
|
||||
resources:
|
||||
- daemonsets
|
||||
- deployments
|
||||
- replicasets
|
||||
verbs: ["list", "watch"]
|
||||
- apiGroups: ["apps"]
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs: ["list", "watch"]
|
||||
- apiGroups: ["batch"]
|
||||
resources:
|
||||
- cronjobs
|
||||
- jobs
|
||||
verbs: ["list", "watch"]
|
||||
- apiGroups: ["autoscaling"]
|
||||
resources:
|
||||
- horizontalpodautoscalers
|
||||
verbs: ["list", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: kube-state-metrics-resizer
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs: ["get"]
|
||||
- apiGroups: ["extensions"]
|
||||
resources:
|
||||
- deployments
|
||||
resourceNames: ["kube-state-metrics"]
|
||||
verbs: ["get", "update"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kube-state-metrics
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kube-state-metrics
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kube-state-metrics
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kube-state-metrics
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kube-state-metrics-resizer
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kube-state-metrics
|
||||
namespace: kube-system
|
23
vendor/k8s.io/kubernetes/cluster/addons/prometheus/kube-state-metrics-service.yaml
generated
vendored
Normal file
23
vendor/k8s.io/kubernetes/cluster/addons/prometheus/kube-state-metrics-service.yaml
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-state-metrics
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "kube-state-metrics"
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
spec:
|
||||
ports:
|
||||
- name: http-metrics
|
||||
port: 8080
|
||||
targetPort: http-metrics
|
||||
protocol: TCP
|
||||
- name: telemetry
|
||||
port: 8081
|
||||
targetPort: telemetry
|
||||
protocol: TCP
|
||||
selector:
|
||||
k8s-app: kube-state-metrics
|
56
vendor/k8s.io/kubernetes/cluster/addons/prometheus/node-exporter-ds.yml
generated
vendored
Normal file
56
vendor/k8s.io/kubernetes/cluster/addons/prometheus/node-exporter-ds.yml
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: node-exporter
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: node-exporter
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v0.15.2
|
||||
spec:
|
||||
updateStrategy:
|
||||
type: OnDelete
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: node-exporter
|
||||
version: v0.15.2
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
containers:
|
||||
- name: prometheus-node-exporter
|
||||
image: "prom/node-exporter:v0.15.2"
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
args:
|
||||
- --path.procfs=/host/proc
|
||||
- --path.sysfs=/host/sys
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: 9100
|
||||
hostPort: 9100
|
||||
volumeMounts:
|
||||
- name: proc
|
||||
mountPath: /host/proc
|
||||
readOnly: true
|
||||
- name: sys
|
||||
mountPath: /host/sys
|
||||
readOnly: true
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 50Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 50Mi
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
volumes:
|
||||
- name: proc
|
||||
hostPath:
|
||||
path: /proc
|
||||
- name: sys
|
||||
hostPath:
|
||||
path: /sys
|
20
vendor/k8s.io/kubernetes/cluster/addons/prometheus/node-exporter-service.yaml
generated
vendored
Normal file
20
vendor/k8s.io/kubernetes/cluster/addons/prometheus/node-exporter-service.yaml
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: node-exporter
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "NodeExporter"
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: metrics
|
||||
port: 9100
|
||||
protocol: TCP
|
||||
targetPort: 9100
|
||||
selector:
|
||||
k8s-app: node-exporter
|
171
vendor/k8s.io/kubernetes/cluster/addons/prometheus/prometheus-configmap.yaml
generated
vendored
Normal file
171
vendor/k8s.io/kubernetes/cluster/addons/prometheus/prometheus-configmap.yaml
generated
vendored
Normal file
@ -0,0 +1,171 @@
|
||||
# Prometheus configuration format https://prometheus.io/docs/prometheus/latest/configuration/configuration/
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: prometheus-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
prometheus.yml: |
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:9090
|
||||
|
||||
- job_name: kubernetes-apiservers
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
relabel_configs:
|
||||
- action: keep
|
||||
regex: default;kubernetes;https
|
||||
source_labels:
|
||||
- __meta_kubernetes_namespace
|
||||
- __meta_kubernetes_service_name
|
||||
- __meta_kubernetes_endpoint_port_name
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
insecure_skip_verify: true
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
|
||||
- job_name: kubernetes-nodes-kubelet
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
insecure_skip_verify: true
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
|
||||
- job_name: kubernetes-nodes-cadvisor
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
- target_label: __metrics_path__
|
||||
replacement: /metrics/cadvisor
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
insecure_skip_verify: true
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
|
||||
- job_name: kubernetes-service-endpoints
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
relabel_configs:
|
||||
- action: keep
|
||||
regex: true
|
||||
source_labels:
|
||||
- __meta_kubernetes_service_annotation_prometheus_io_scrape
|
||||
- action: replace
|
||||
regex: (https?)
|
||||
source_labels:
|
||||
- __meta_kubernetes_service_annotation_prometheus_io_scheme
|
||||
target_label: __scheme__
|
||||
- action: replace
|
||||
regex: (.+)
|
||||
source_labels:
|
||||
- __meta_kubernetes_service_annotation_prometheus_io_path
|
||||
target_label: __metrics_path__
|
||||
- action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
source_labels:
|
||||
- __address__
|
||||
- __meta_kubernetes_service_annotation_prometheus_io_port
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_service_label_(.+)
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_namespace
|
||||
target_label: kubernetes_namespace
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_service_name
|
||||
target_label: kubernetes_name
|
||||
|
||||
- job_name: kubernetes-services
|
||||
kubernetes_sd_configs:
|
||||
- role: service
|
||||
metrics_path: /probe
|
||||
params:
|
||||
module:
|
||||
- http_2xx
|
||||
relabel_configs:
|
||||
- action: keep
|
||||
regex: true
|
||||
source_labels:
|
||||
- __meta_kubernetes_service_annotation_prometheus_io_probe
|
||||
- source_labels:
|
||||
- __address__
|
||||
target_label: __param_target
|
||||
- replacement: blackbox
|
||||
target_label: __address__
|
||||
- source_labels:
|
||||
- __param_target
|
||||
target_label: instance
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_service_label_(.+)
|
||||
- source_labels:
|
||||
- __meta_kubernetes_namespace
|
||||
target_label: kubernetes_namespace
|
||||
- source_labels:
|
||||
- __meta_kubernetes_service_name
|
||||
target_label: kubernetes_name
|
||||
|
||||
- job_name: kubernetes-pods
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
relabel_configs:
|
||||
- action: keep
|
||||
regex: true
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_scrape
|
||||
- action: replace
|
||||
regex: (.+)
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_path
|
||||
target_label: __metrics_path__
|
||||
- action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
source_labels:
|
||||
- __address__
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_port
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_namespace
|
||||
target_label: kubernetes_namespace
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_name
|
||||
target_label: kubernetes_pod_name
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- kubernetes_sd_configs:
|
||||
- role: pod
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
regex: kube-system
|
||||
action: keep
|
||||
- source_labels: [__meta_kubernetes_pod_label_k8s_app]
|
||||
regex: alertmanager
|
||||
action: keep
|
||||
- source_labels: [__meta_kubernetes_pod_container_port_number]
|
||||
regex:
|
||||
action: drop
|
55
vendor/k8s.io/kubernetes/cluster/addons/prometheus/prometheus-rbac.yaml
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/cluster/addons/prometheus/prometheus-rbac.yaml
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: prometheus
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
- nodes/metrics
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- nonResourceURLs:
|
||||
- "/metrics"
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: prometheus
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: prometheus
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: prometheus
|
||||
namespace: kube-system
|
17
vendor/k8s.io/kubernetes/cluster/addons/prometheus/prometheus-service.yaml
generated
vendored
Normal file
17
vendor/k8s.io/kubernetes/cluster/addons/prometheus/prometheus-service.yaml
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/name: "Prometheus"
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 9090
|
||||
protocol: TCP
|
||||
targetPort: 9090
|
||||
selector:
|
||||
k8s-app: prometheus
|
109
vendor/k8s.io/kubernetes/cluster/addons/prometheus/prometheus-statefulset.yaml
generated
vendored
Normal file
109
vendor/k8s.io/kubernetes/cluster/addons/prometheus/prometheus-statefulset.yaml
generated
vendored
Normal file
@ -0,0 +1,109 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: prometheus
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v2.2.1
|
||||
spec:
|
||||
serviceName: "prometheus"
|
||||
replicas: 1
|
||||
podManagementPolicy: "Parallel"
|
||||
updateStrategy:
|
||||
type: "RollingUpdate"
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: prometheus
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: prometheus
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
serviceAccountName: prometheus
|
||||
initContainers:
|
||||
- name: "init-chown-data"
|
||||
image: "busybox:latest"
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
command: ["chown", "-R", "65534:65534", "/data"]
|
||||
volumeMounts:
|
||||
- name: prometheus-data
|
||||
mountPath: /data
|
||||
subPath: ""
|
||||
containers:
|
||||
- name: prometheus-server-configmap-reload
|
||||
image: "jimmidyson/configmap-reload:v0.1"
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
args:
|
||||
- --volume-dir=/etc/config
|
||||
- --webhook-url=http://localhost:9090/-/reload
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config
|
||||
readOnly: true
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 10Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 10Mi
|
||||
|
||||
- name: prometheus-server
|
||||
image: "prom/prometheus:v2.2.1"
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
args:
|
||||
- --config.file=/etc/config/prometheus.yml
|
||||
- --storage.tsdb.path=/data
|
||||
- --web.console.libraries=/etc/prometheus/console_libraries
|
||||
- --web.console.templates=/etc/prometheus/consoles
|
||||
- --web.enable-lifecycle
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /-/ready
|
||||
port: 9090
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /-/healthy
|
||||
port: 9090
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
# based on 10 running nodes with 30 pods each
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 1000Mi
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 1000Mi
|
||||
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config
|
||||
- name: prometheus-data
|
||||
mountPath: /data
|
||||
subPath: ""
|
||||
terminationGracePeriodSeconds: 300
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: prometheus-config
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: prometheus-data
|
||||
spec:
|
||||
storageClassName: standard
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: "16Gi"
|
68
vendor/k8s.io/kubernetes/cluster/addons/rbac/cluster-autoscaler/cluster-autoscaler-rbac.yaml
generated
vendored
Normal file
68
vendor/k8s.io/kubernetes/cluster/addons/rbac/cluster-autoscaler/cluster-autoscaler-rbac.yaml
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: cluster-autoscaler
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
# leader election
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
resourceNames: ["cluster-autoscaler"]
|
||||
verbs: ["get", "update", "patch", "delete"]
|
||||
# accessing & modifying cluster state (nodes & pods)
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/eviction"]
|
||||
verbs: ["create"]
|
||||
# read-only access to cluster state
|
||||
- apiGroups: [""]
|
||||
resources: ["services", "replicationcontrollers", "persistentvolumes", "persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["apps", "extensions"]
|
||||
resources: ["daemonsets", "replicasets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["statefulsets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
# misc access
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["cluster-autoscaler-status"]
|
||||
verbs: ["get", "update", "patch", "delete"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: cluster-autoscaler
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
subjects:
|
||||
- kind: User
|
||||
name: cluster-autoscaler
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-autoscaler
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
Reference in New Issue
Block a user