mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
Fresh dep ensure
This commit is contained in:
11
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/CHANGELOG.md
generated
vendored
11
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/CHANGELOG.md
generated
vendored
@ -1,3 +1,14 @@
|
||||
### Version 8.9 (Fri October 19 2018 Jeff Grafton <jgrafton@google.com>)
|
||||
- Update to use debian-base:0.4.0.
|
||||
- Update kubectl to v1.11.3.
|
||||
|
||||
### Version 8.8 (Mon October 1 2018 Zihong Zheng <zihongz@google.com>)
|
||||
- Update to use debian-base:0.3.2.
|
||||
|
||||
### Version 8.7 (Tue September 4 2018 Zihong Zheng <zihongz@google.com>)
|
||||
- Support extra `--prune-whitelist` resources in kube-addon-manager.
|
||||
- Update kubectl to v1.10.7.
|
||||
|
||||
### Version 8.6 (Tue February 20 2018 Zihong Zheng <zihongz@google.com>)
|
||||
- Allow reconcile/ensure loop to work with resource under non-kube-system namespace.
|
||||
- Update kubectl to v1.9.3.
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/Dockerfile
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/Dockerfile
generated
vendored
@ -14,6 +14,8 @@
|
||||
|
||||
FROM BASEIMAGE
|
||||
|
||||
RUN clean-install bash
|
||||
|
||||
ADD kube-addons.sh /opt/
|
||||
ADD namespace.yaml /opt/
|
||||
ADD kubectl /usr/local/bin/
|
||||
|
20
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/Makefile
generated
vendored
20
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/Makefile
generated
vendored
@ -15,24 +15,10 @@
|
||||
IMAGE=staging-k8s.gcr.io/kube-addon-manager
|
||||
ARCH?=amd64
|
||||
TEMP_DIR:=$(shell mktemp -d)
|
||||
VERSION=v8.6
|
||||
KUBECTL_VERSION?=v1.9.3
|
||||
VERSION=v8.9
|
||||
KUBECTL_VERSION?=v1.11.3
|
||||
|
||||
ifeq ($(ARCH),amd64)
|
||||
BASEIMAGE?=bashell/alpine-bash
|
||||
endif
|
||||
ifeq ($(ARCH),arm)
|
||||
BASEIMAGE?=arm32v7/debian
|
||||
endif
|
||||
ifeq ($(ARCH),arm64)
|
||||
BASEIMAGE?=arm64v8/debian
|
||||
endif
|
||||
ifeq ($(ARCH),ppc64le)
|
||||
BASEIMAGE?=ppc64le/debian
|
||||
endif
|
||||
ifeq ($(ARCH),s390x)
|
||||
BASEIMAGE?=s390x/debian
|
||||
endif
|
||||
BASEIMAGE=k8s.gcr.io/debian-base-$(ARCH):0.4.0
|
||||
|
||||
.PHONY: build push
|
||||
|
||||
|
48
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/kube-addons.sh
generated
vendored
48
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/kube-addons.sh
generated
vendored
@ -28,6 +28,29 @@
|
||||
|
||||
KUBECTL=${KUBECTL_BIN:-/usr/local/bin/kubectl}
|
||||
KUBECTL_OPTS=${KUBECTL_OPTS:-}
|
||||
# KUBECTL_PRUNE_WHITELIST is a list of resources whitelisted by
|
||||
# default.
|
||||
# This is currently the same with the default in:
|
||||
# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/apply.go
|
||||
KUBECTL_PRUNE_WHITELIST=(
|
||||
core/v1/ConfigMap
|
||||
core/v1/Endpoints
|
||||
core/v1/Namespace
|
||||
core/v1/PersistentVolumeClaim
|
||||
core/v1/PersistentVolume
|
||||
core/v1/Pod
|
||||
core/v1/ReplicationController
|
||||
core/v1/Secret
|
||||
core/v1/Service
|
||||
batch/v1/Job
|
||||
batch/v1beta1/CronJob
|
||||
extensions/v1beta1/DaemonSet
|
||||
extensions/v1beta1/Deployment
|
||||
extensions/v1beta1/Ingress
|
||||
extensions/v1beta1/ReplicaSet
|
||||
apps/v1beta1/StatefulSet
|
||||
apps/v1beta1/Deployment
|
||||
)
|
||||
|
||||
ADDON_CHECK_INTERVAL_SEC=${TEST_ADDON_CHECK_INTERVAL_SEC:-60}
|
||||
ADDON_PATH=${ADDON_PATH:-/etc/kubernetes/addons}
|
||||
@ -82,6 +105,25 @@ function log() {
|
||||
esac
|
||||
}
|
||||
|
||||
# Generate kubectl prune-whitelist flags from provided resource list.
|
||||
function generate_prune_whitelist_flags() {
|
||||
local -r resources=($@)
|
||||
for resource in "${resources[@]}"; do
|
||||
printf "%s" "--prune-whitelist ${resource} "
|
||||
done
|
||||
}
|
||||
|
||||
# KUBECTL_EXTRA_PRUNE_WHITELIST is a list of extra whitelisted resources
|
||||
# besides the default ones.
|
||||
extra_prune_whitelist=
|
||||
if [ -n "${KUBECTL_EXTRA_PRUNE_WHITELIST:-}" ]; then
|
||||
extra_prune_whitelist=( ${KUBECTL_EXTRA_PRUNE_WHITELIST:-} )
|
||||
fi
|
||||
prune_whitelist=( ${KUBECTL_PRUNE_WHITELIST[@]} ${extra_prune_whitelist[@]} )
|
||||
prune_whitelist_flags=$(generate_prune_whitelist_flags ${prune_whitelist[@]})
|
||||
|
||||
log INFO "== Generated kubectl prune whitelist flags: $prune_whitelist_flags =="
|
||||
|
||||
# $1 filename of addon to start.
|
||||
# $2 count of tries to start the addon.
|
||||
# $3 delay in seconds between two consecutive tries
|
||||
@ -126,12 +168,12 @@ function reconcile_addons() {
|
||||
log INFO "== Reconciling with deprecated label =="
|
||||
${KUBECTL} ${KUBECTL_OPTS} apply -f ${ADDON_PATH} \
|
||||
-l ${CLUSTER_SERVICE_LABEL}=true,${ADDON_MANAGER_LABEL}!=EnsureExists \
|
||||
--prune=true --recursive | grep -v configured
|
||||
--prune=true ${prune_whitelist_flags} --recursive | grep -v configured
|
||||
|
||||
log INFO "== Reconciling with addon-manager label =="
|
||||
${KUBECTL} ${KUBECTL_OPTS} apply -f ${ADDON_PATH} \
|
||||
-l ${CLUSTER_SERVICE_LABEL}!=true,${ADDON_MANAGER_LABEL}=Reconcile \
|
||||
--prune=true --recursive | grep -v configured
|
||||
--prune=true ${prune_whitelist_flags} --recursive | grep -v configured
|
||||
|
||||
log INFO "== Kubernetes addon reconcile completed at $(date -Is) =="
|
||||
}
|
||||
@ -166,7 +208,7 @@ function is_leader() {
|
||||
|
||||
# The business logic for whether a given object should be created
|
||||
# was already enforced by salt, and /etc/kubernetes/addons is the
|
||||
# managed result is of that. Start everything below that directory.
|
||||
# managed result of that. Start everything below that directory.
|
||||
log INFO "== Kubernetes addon manager started at $(date -Is) with ADDON_CHECK_INTERVAL_SEC=${ADDON_CHECK_INTERVAL_SEC} =="
|
||||
|
||||
# Create the namespace that will be used to host the cluster-level add-ons.
|
||||
|
@ -10,6 +10,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- namespaces
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
|
@ -17,19 +17,22 @@ spec:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
securityContext:
|
||||
supplementalGroups: [ 65534 ]
|
||||
fsGroup: 65534
|
||||
containers:
|
||||
- image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2
|
||||
name: autoscaler
|
||||
command:
|
||||
- /cluster-proportional-autoscaler
|
||||
- --namespace=kube-system
|
||||
- --configmap=calico-typha-horizontal-autoscaler
|
||||
- --target=deployment/calico-typha
|
||||
- --logtostderr=true
|
||||
- --v=2
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
limits:
|
||||
cpu: 10m
|
||||
- image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2
|
||||
name: autoscaler
|
||||
command:
|
||||
- /cluster-proportional-autoscaler
|
||||
- --namespace=kube-system
|
||||
- --configmap=calico-typha-horizontal-autoscaler
|
||||
- --target=deployment/calico-typha
|
||||
- --logtostderr=true
|
||||
- --v=2
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
limits:
|
||||
cpu: 10m
|
||||
serviceAccountName: typha-cpha
|
||||
|
6
vendor/k8s.io/kubernetes/cluster/addons/cluster-loadbalancing/OWNERS
generated
vendored
6
vendor/k8s.io/kubernetes/cluster/addons/cluster-loadbalancing/OWNERS
generated
vendored
@ -1,6 +1,8 @@
|
||||
approvers:
|
||||
- bowei
|
||||
- nicksardo
|
||||
- rramkumar1
|
||||
- mrhohn
|
||||
reviewers:
|
||||
- bowei
|
||||
- nicksardo
|
||||
- rramkumar1
|
||||
- mrhohn
|
||||
|
@ -25,7 +25,7 @@ spec:
|
||||
# Any image is permissible as long as:
|
||||
# 1. It serves a 404 page at /
|
||||
# 2. It serves 200 on a /healthz endpoint
|
||||
image: k8s.gcr.io/defaultbackend:1.4
|
||||
image: k8s.gcr.io/defaultbackend-amd64:1.5
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
|
@ -36,31 +36,34 @@ data:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.5.3
|
||||
name: heapster-v1.6.0-beta.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
securityContext:
|
||||
supplementalGroups: [ 65534 ]
|
||||
fsGroup: 65534
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@ -73,13 +76,13 @@ spec:
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --sink=gcm
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: eventer
|
||||
command:
|
||||
- /eventer
|
||||
- --source=kubernetes:''
|
||||
- --sink=gcl
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.1
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.4
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
@ -108,11 +111,14 @@ spec:
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{metrics_memory_per_node}}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.1
|
||||
# Specifies the smallest cluster (defined in number of nodes)
|
||||
# resources will be scaled to.
|
||||
- --minClusterSize={{ heapster_min_cluster_size }}
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.4
|
||||
name: eventer-nanny
|
||||
resources:
|
||||
limits:
|
||||
@ -141,7 +147,7 @@ spec:
|
||||
- --memory={{base_eventer_memory}}
|
||||
- --extra-memory={{eventer_memory_per_node}}Ki
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=eventer
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
|
@ -36,31 +36,34 @@ data:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.5.3
|
||||
name: heapster-v1.6.0-beta.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
securityContext:
|
||||
supplementalGroups: [ 65534 ]
|
||||
fsGroup: 65534
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@ -74,13 +77,13 @@ spec:
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --sink=influxdb:http://monitoring-influxdb:8086
|
||||
- --sink=gcm:?metrics=autoscaling
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: eventer
|
||||
command:
|
||||
- /eventer
|
||||
- --source=kubernetes:''
|
||||
- --sink=gcl
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.1
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.4
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
@ -109,11 +112,14 @@ spec:
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{ metrics_memory_per_node }}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.1
|
||||
# Specifies the smallest cluster (defined in number of nodes)
|
||||
# resources will be scaled to.
|
||||
- --minClusterSize={{ heapster_min_cluster_size }}
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.4
|
||||
name: eventer-nanny
|
||||
resources:
|
||||
limits:
|
||||
@ -142,7 +148,7 @@ spec:
|
||||
- --memory={{ base_eventer_memory }}
|
||||
- --extra-memory={{ eventer_memory_per_node }}Ki
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=eventer
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
|
@ -36,31 +36,34 @@ data:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.5.3
|
||||
name: heapster-v1.6.0-beta.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
securityContext:
|
||||
supplementalGroups: [ 65534 ]
|
||||
fsGroup: 65534
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@ -73,13 +76,13 @@ spec:
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --sink=influxdb:http://monitoring-influxdb:8086
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: eventer
|
||||
command:
|
||||
- /eventer
|
||||
- --source=kubernetes:''
|
||||
- --sink=influxdb:http://monitoring-influxdb:8086
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.1
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.4
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
@ -108,11 +111,14 @@ spec:
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{ metrics_memory_per_node }}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.1
|
||||
# Specifies the smallest cluster (defined in number of nodes)
|
||||
# resources will be scaled to.
|
||||
- --minClusterSize={{ heapster_min_cluster_size }}
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.4
|
||||
name: eventer-nanny
|
||||
resources:
|
||||
limits:
|
||||
@ -141,7 +147,7 @@ spec:
|
||||
- --memory={{ base_eventer_memory }}
|
||||
- --extra-memory={{ eventer_memory_per_node }}Ki
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=eventer
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
|
@ -23,31 +23,34 @@ data:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.5.3
|
||||
name: heapster-v1.6.0-beta.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
securityContext:
|
||||
supplementalGroups: [ 65534 ]
|
||||
fsGroup: 65534
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@ -57,12 +60,13 @@ spec:
|
||||
initialDelaySeconds: 180
|
||||
timeoutSeconds: 5
|
||||
command:
|
||||
# On GCP, container.googleapis.com/instance_id node annotation is used to provide instance_id label for Stackdriver
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --source=kubernetes.summary_api:?host_id_annotation=container.googleapis.com/instance_id
|
||||
- --sink=stackdriver:?cluster_name={{ cluster_name }}&use_old_resources={{ use_old_resources }}&use_new_resources={{ use_new_resources }}&min_interval_sec=100&batch_export_timeout_sec=110&cluster_location={{ cluster_location }}
|
||||
# BEGIN_PROMETHEUS_TO_SD
|
||||
- name: prom-to-sd
|
||||
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
|
||||
image: k8s.gcr.io/prometheus-to-sd:v0.3.1
|
||||
command:
|
||||
- /monitor
|
||||
- --source=heapster:http://localhost:8082?whitelisted=stackdriver_requests_count,stackdriver_timeseries_count
|
||||
@ -80,7 +84,7 @@ spec:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
# END_PROMETHEUS_TO_SD
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.1
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.4
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
@ -109,10 +113,13 @@ spec:
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{metrics_memory_per_node}}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
# Specifies the smallest cluster (defined in number of nodes)
|
||||
# resources will be scaled to.
|
||||
- --minClusterSize={{ heapster_min_cluster_size }}
|
||||
volumes:
|
||||
- name: heapster-config-volume
|
||||
configMap:
|
||||
|
@ -23,31 +23,34 @@ data:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.5.3
|
||||
name: heapster-v1.6.0-beta.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
securityContext:
|
||||
supplementalGroups: [ 65534 ]
|
||||
fsGroup: 65534
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@ -59,7 +62,7 @@ spec:
|
||||
command:
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.1
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.4
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
@ -88,10 +91,13 @@ spec:
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{ metrics_memory_per_node }}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
# Specifies the smallest cluster (defined in number of nodes)
|
||||
# resources will be scaled to.
|
||||
- --minClusterSize={{ heapster_min_cluster_size }}
|
||||
volumes:
|
||||
- name: heapster-config-volume
|
||||
configMap:
|
||||
|
10
vendor/k8s.io/kubernetes/cluster/addons/dashboard/OWNERS
generated
vendored
10
vendor/k8s.io/kubernetes/cluster/addons/dashboard/OWNERS
generated
vendored
@ -1,6 +1,12 @@
|
||||
approvers:
|
||||
- floreks
|
||||
- maciaszczykm
|
||||
- bryk
|
||||
reviewers:
|
||||
- cheld
|
||||
- cupofcat
|
||||
- danielromlein
|
||||
- floreks
|
||||
- ianlewis
|
||||
- konryd
|
||||
- maciaszczykm
|
||||
- mhenc
|
||||
- rf232
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-controller.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-controller.yaml
generated
vendored
@ -31,7 +31,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
|
||||
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.0
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
|
@ -29,7 +29,7 @@ metadata:
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["list"]
|
||||
verbs: ["list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["replicationcontrollers/scale"]
|
||||
verbs: ["get", "update"]
|
||||
@ -80,9 +80,12 @@ spec:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
securityContext:
|
||||
supplementalGroups: [ 65534 ]
|
||||
fsGroup: 65534
|
||||
containers:
|
||||
- name: autoscaler
|
||||
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2
|
||||
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.3.0
|
||||
resources:
|
||||
requests:
|
||||
cpu: "20m"
|
||||
|
12
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/coredns.yaml.base
generated
vendored
12
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/coredns.yaml.base
generated
vendored
@ -27,6 +27,12 @@ rules:
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
@ -66,7 +72,9 @@ data:
|
||||
prometheus :9153
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
@ -100,13 +108,11 @@ spec:
|
||||
spec:
|
||||
serviceAccountName: coredns
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: coredns
|
||||
image: k8s.gcr.io/coredns:1.1.3
|
||||
image: k8s.gcr.io/coredns:1.2.6
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
|
12
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/coredns.yaml.in
generated
vendored
12
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/coredns.yaml.in
generated
vendored
@ -27,6 +27,12 @@ rules:
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
@ -66,7 +72,9 @@ data:
|
||||
prometheus :9153
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
@ -100,13 +108,11 @@ spec:
|
||||
spec:
|
||||
serviceAccountName: coredns
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: coredns
|
||||
image: k8s.gcr.io/coredns:1.1.3
|
||||
image: k8s.gcr.io/coredns:1.2.6
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
|
12
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/coredns.yaml.sed
generated
vendored
12
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/coredns.yaml.sed
generated
vendored
@ -27,6 +27,12 @@ rules:
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
@ -66,7 +72,9 @@ data:
|
||||
prometheus :9153
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
@ -100,13 +108,11 @@ spec:
|
||||
spec:
|
||||
serviceAccountName: coredns
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: coredns
|
||||
image: k8s.gcr.io/coredns:1.1.3
|
||||
image: k8s.gcr.io/coredns:1.2.6
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
|
6
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/README.md
generated
vendored
6
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/README.md
generated
vendored
@ -9,7 +9,7 @@ can use the DNS Service’s IP to resolve DNS names.
|
||||
## Manually scale kube-dns Deployment
|
||||
|
||||
kube-dns creates only one DNS Pod by default. If
|
||||
[dns-horizontal-autoscaler](../dns-horizontal-autoscaler/)
|
||||
[dns-horizontal-autoscaler](../../dns-horizontal-autoscaler/)
|
||||
is not enabled, you may need to manually scale kube-dns Deployment.
|
||||
|
||||
Please use below `kubectl scale` command to scale:
|
||||
@ -18,9 +18,9 @@ kubectl --namespace=kube-system scale deployment kube-dns --replicas=<NUM_YOU_WA
|
||||
```
|
||||
|
||||
Do not use `kubectl edit` to modify kube-dns Deployment object if it is
|
||||
controlled by [Addon Manager](../addon-manager/). Otherwise the modifications
|
||||
controlled by [Addon Manager](../../addon-manager/). Otherwise the modifications
|
||||
will be clobbered, in addition the replicas count for kube-dns Deployment will
|
||||
be reset to 1. See [Cluster add-ons README](../README.md) and
|
||||
be reset to 1. See [Cluster add-ons README](../../README.md) and
|
||||
[#36411](https://github.com/kubernetes/kubernetes/issues/36411) for reference.
|
||||
|
||||
## kube-dns addon templates
|
||||
|
10
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/kube-dns.yaml.base
generated
vendored
10
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/kube-dns.yaml.base
generated
vendored
@ -86,6 +86,9 @@ spec:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
securityContext:
|
||||
supplementalGroups: [ 65534 ]
|
||||
fsGroup: 65534
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
@ -96,7 +99,7 @@ spec:
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
@ -147,7 +150,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.13
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
@ -166,6 +169,7 @@ spec:
|
||||
- -k
|
||||
- --cache-size=1000
|
||||
- --no-negcache
|
||||
- --dns-loop-detect
|
||||
- --log-facility=-
|
||||
- --server=/__PILLAR__DNS__DOMAIN__/127.0.0.1#10053
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
@ -186,7 +190,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10
|
||||
image: k8s.gcr.io/k8s-dns-sidecar:1.14.13
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
|
10
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/kube-dns.yaml.in
generated
vendored
10
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/kube-dns.yaml.in
generated
vendored
@ -86,6 +86,9 @@ spec:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
securityContext:
|
||||
supplementalGroups: [ 65534 ]
|
||||
fsGroup: 65534
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
@ -96,7 +99,7 @@ spec:
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
@ -147,7 +150,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.13
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
@ -166,6 +169,7 @@ spec:
|
||||
- -k
|
||||
- --cache-size=1000
|
||||
- --no-negcache
|
||||
- --dns-loop-detect
|
||||
- --log-facility=-
|
||||
- --server=/{{ pillar['dns_domain'] }}/127.0.0.1#10053
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
@ -186,7 +190,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10
|
||||
image: k8s.gcr.io/k8s-dns-sidecar:1.14.13
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
|
10
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/kube-dns.yaml.sed
generated
vendored
10
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/kube-dns.yaml.sed
generated
vendored
@ -86,6 +86,9 @@ spec:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
securityContext:
|
||||
supplementalGroups: [ 65534 ]
|
||||
fsGroup: 65534
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
@ -96,7 +99,7 @@ spec:
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
@ -147,7 +150,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.13
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
@ -166,6 +169,7 @@ spec:
|
||||
- -k
|
||||
- --cache-size=1000
|
||||
- --no-negcache
|
||||
- --dns-loop-detect
|
||||
- --log-facility=-
|
||||
- --server=/$DNS_DOMAIN/127.0.0.1#10053
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
@ -186,7 +190,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10
|
||||
image: k8s.gcr.io/k8s-dns-sidecar:1.14.13
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
|
35
vendor/k8s.io/kubernetes/cluster/addons/dns/nodelocaldns/README.md
generated
vendored
Normal file
35
vendor/k8s.io/kubernetes/cluster/addons/dns/nodelocaldns/README.md
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
# Nodelocal DNS Cache
|
||||
|
||||
This addon runs a node-local-dns pod on all cluster nodes. The pod runs CoreDNS as the dns cache. It runs with `hostNetwork:True` and creates a dedicated dummy interface with a link local ip(169.254.20.10/32 by default) to listen for DNS queries. The cache instances connect to clusterDNS in case of cache misses.
|
||||
|
||||
Design details [here](https://github.com/kubernetes/community/blob/master/keps/sig-network/0030-nodelocal-dns-cache.md)
|
||||
|
||||
## nodelocaldns addon template
|
||||
|
||||
This directory contains the addon config yaml - `nodelocaldns.yaml`
|
||||
The variables will be substituted by the configure scripts when the yaml is copied into master.
|
||||
|
||||
### Network policy and DNS connectivity
|
||||
|
||||
When running nodelocaldns addon on clusters using network policy, additional rules might be required to enable dns connectivity.
|
||||
Using a namespace selector for dns egress traffic as shown [here](https://docs.projectcalico.org/v2.6/getting-started/kubernetes/tutorials/advanced-policy)
|
||||
might not be enough since the node-local-dns pods run with `hostNetwork: True`
|
||||
|
||||
One way to enable connectivity from node-local-dns pods to clusterDNS ip is to use an ipBlock rule instead:
|
||||
|
||||
```
|
||||
spec:
|
||||
egress:
|
||||
- ports:
|
||||
- port: 53
|
||||
protocol: TCP
|
||||
- port: 53
|
||||
protocol: UDP
|
||||
to:
|
||||
- ipBlock:
|
||||
cidr: <well-known clusterIP for DNS>/32
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
```
|
144
vendor/k8s.io/kubernetes/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml
generated
vendored
Normal file
144
vendor/k8s.io/kubernetes/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml
generated
vendored
Normal file
@ -0,0 +1,144 @@
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: node-local-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: node-local-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
Corefile: |
|
||||
__PILLAR__DNS__DOMAIN__:53 {
|
||||
errors
|
||||
cache 30
|
||||
reload
|
||||
loop
|
||||
bind __PILLAR__LOCAL__DNS__
|
||||
forward . __PILLAR__DNS__SERVER__ {
|
||||
force_tcp
|
||||
}
|
||||
prometheus :9253
|
||||
health __PILLAR__LOCAL__DNS__:8080
|
||||
}
|
||||
in-addr.arpa:53 {
|
||||
errors
|
||||
cache 30
|
||||
reload
|
||||
loop
|
||||
bind __PILLAR__LOCAL__DNS__
|
||||
forward . __PILLAR__DNS__SERVER__ {
|
||||
force_tcp
|
||||
}
|
||||
prometheus :9253
|
||||
}
|
||||
ip6.arpa:53 {
|
||||
errors
|
||||
cache 30
|
||||
reload
|
||||
loop
|
||||
bind __PILLAR__LOCAL__DNS__
|
||||
forward . __PILLAR__DNS__SERVER__ {
|
||||
force_tcp
|
||||
}
|
||||
prometheus :9253
|
||||
}
|
||||
.:53 {
|
||||
errors
|
||||
cache 30
|
||||
reload
|
||||
loop
|
||||
bind __PILLAR__LOCAL__DNS__
|
||||
forward . /etc/resolv.conf {
|
||||
force_tcp
|
||||
}
|
||||
prometheus :9253
|
||||
}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: node-local-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: node-local-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: node-local-dns
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: node-local-dns
|
||||
hostNetwork: true
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: node-cache
|
||||
image: k8s.gcr.io/k8s-dns-node-cache:1.15.0
|
||||
resources:
|
||||
limits:
|
||||
memory: 30Mi
|
||||
requests:
|
||||
cpu: 25m
|
||||
memory: 5Mi
|
||||
args: [ "-localip", "__PILLAR__LOCAL__DNS__", "-conf", "/etc/coredns/Corefile" ]
|
||||
securityContext:
|
||||
privileged: true
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
- containerPort: 9253
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: __PILLAR__LOCAL__DNS__
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/coredns
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: node-local-dns
|
||||
items:
|
||||
- key: Corefile
|
||||
path: Corefile
|
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/OWNERS
generated
vendored
@ -4,3 +4,5 @@ approvers:
|
||||
reviewers:
|
||||
- coffeepac
|
||||
- piosz
|
||||
labels:
|
||||
- sig/instrumentation
|
||||
|
19
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/README.md
generated
vendored
19
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/README.md
generated
vendored
@ -19,15 +19,16 @@ a Deployment, but allows for maintaining state on storage volumes.
|
||||
|
||||
### Security
|
||||
|
||||
Elasticsearch has capabilities to enable authorization using the
|
||||
[X-Pack plugin][xPack]. See configuration parameter `xpack.security.enabled`
|
||||
in Elasticsearch and Kibana configurations. It can also be set via the
|
||||
`XPACK_SECURITY_ENABLED` env variable. After enabling the feature,
|
||||
follow [official documentation][setupCreds] to set up credentials in
|
||||
Elasticsearch and Kibana. Don't forget to propagate those credentials also to
|
||||
Fluentd in its [configuration][fluentdCreds], using for example
|
||||
[environment variables][fluentdEnvVar]. You can utilize [ConfigMaps][configMap]
|
||||
and [Secrets][secret] to store credentials in the Kubernetes apiserver.
|
||||
Elasticsearch has capabilities to enable authorization using the [X-Pack
|
||||
plugin][xPack]. For the sake of simplicity this example uses the fully open
|
||||
source prebuild images from elastic that do not contain the X-Pack plugin. If
|
||||
you need these features, please consider building the images from either the
|
||||
"basic" or "platinum" version. After enabling these features, follow [official
|
||||
documentation][setupCreds] to set up credentials in Elasticsearch and Kibana.
|
||||
Don't forget to propagate those credentials also to Fluentd in its
|
||||
[configuration][fluentdCreds], using for example [environment
|
||||
variables][fluentdEnvVar]. You can utilize [ConfigMaps][configMap] and
|
||||
[Secrets][secret] to store credentials in the Kubernetes apiserver.
|
||||
|
||||
### Initialization
|
||||
|
||||
|
10
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/BUILD
generated
vendored
@ -18,11 +18,11 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/Dockerfile
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/Dockerfile
generated
vendored
@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM docker.elastic.co/elasticsearch/elasticsearch:5.6.4
|
||||
FROM docker.elastic.co/elasticsearch/elasticsearch-oss:6.3.2
|
||||
|
||||
VOLUME ["/data"]
|
||||
EXPOSE 9200 9300
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/Makefile
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/Makefile
generated
vendored
@ -16,7 +16,7 @@
|
||||
|
||||
PREFIX = staging-k8s.gcr.io
|
||||
IMAGE = elasticsearch
|
||||
TAG = v5.6.4
|
||||
TAG = v6.3.0
|
||||
|
||||
build:
|
||||
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .
|
||||
|
@ -12,6 +12,3 @@ path.data: /data
|
||||
network.host: 0.0.0.0
|
||||
|
||||
discovery.zen.minimum_master_nodes: ${MINIMUM_MASTER_NODES}
|
||||
|
||||
xpack.security.enabled: false
|
||||
xpack.monitoring.enabled: false
|
||||
|
@ -20,14 +20,15 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/klog"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
)
|
||||
@ -60,22 +61,22 @@ func flattenSubsets(subsets []api.EndpointSubset) []string {
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
glog.Info("Kubernetes Elasticsearch logging discovery")
|
||||
klog.Info("Kubernetes Elasticsearch logging discovery")
|
||||
|
||||
cc, err := buildConfigFromEnvs(os.Getenv("APISERVER_HOST"), os.Getenv("KUBE_CONFIG_FILE"))
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to make client: %v", err)
|
||||
klog.Fatalf("Failed to make client: %v", err)
|
||||
}
|
||||
client, err := clientset.NewForConfig(cc)
|
||||
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to make client: %v", err)
|
||||
klog.Fatalf("Failed to make client: %v", err)
|
||||
}
|
||||
namespace := metav1.NamespaceSystem
|
||||
envNamespace := os.Getenv("NAMESPACE")
|
||||
if envNamespace != "" {
|
||||
if _, err := client.Core().Namespaces().Get(envNamespace, metav1.GetOptions{}); err != nil {
|
||||
glog.Fatalf("%s namespace doesn't exist: %v", envNamespace, err)
|
||||
klog.Fatalf("%s namespace doesn't exist: %v", envNamespace, err)
|
||||
}
|
||||
namespace = envNamespace
|
||||
}
|
||||
@ -97,32 +98,31 @@ func main() {
|
||||
// If we did not find an elasticsearch logging service then log a warning
|
||||
// and return without adding any unicast hosts.
|
||||
if elasticsearch == nil {
|
||||
glog.Warningf("Failed to find the elasticsearch-logging service: %v", err)
|
||||
klog.Warningf("Failed to find the elasticsearch-logging service: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
var endpoints *api.Endpoints
|
||||
addrs := []string{}
|
||||
// Wait for some endpoints.
|
||||
count := 0
|
||||
count, _ := strconv.Atoi(os.Getenv("MINIMUM_MASTER_NODES"))
|
||||
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
|
||||
endpoints, err = client.Core().Endpoints(namespace).Get(serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
addrs = flattenSubsets(endpoints.Subsets)
|
||||
glog.Infof("Found %s", addrs)
|
||||
if len(addrs) > 0 && len(addrs) == count {
|
||||
klog.Infof("Found %s", addrs)
|
||||
if len(addrs) > 0 && len(addrs) >= count {
|
||||
break
|
||||
}
|
||||
count = len(addrs)
|
||||
}
|
||||
// If there was an error finding endpoints then log a warning and quit.
|
||||
if err != nil {
|
||||
glog.Warningf("Error finding endpoints: %v", err)
|
||||
klog.Warningf("Error finding endpoints: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
glog.Infof("Endpoints = %s", addrs)
|
||||
klog.Infof("Endpoints = %s", addrs)
|
||||
fmt.Printf("discovery.zen.ping.unicast.hosts: [%s]\n", strings.Join(addrs, ", "))
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/run.sh
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/run.sh
generated
vendored
@ -26,4 +26,4 @@ export MINIMUM_MASTER_NODES=${MINIMUM_MASTER_NODES:-2}
|
||||
chown -R elasticsearch:elasticsearch /data
|
||||
|
||||
./bin/elasticsearch_logging_discovery >> ./config/elasticsearch.yml
|
||||
exec su elasticsearch -c ./bin/es-docker
|
||||
exec su elasticsearch -c /usr/local/bin/docker-entrypoint.sh
|
||||
|
8
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml
generated
vendored
8
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml
generated
vendored
@ -54,7 +54,7 @@ metadata:
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: v5.6.4
|
||||
version: v6.3.0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
@ -63,17 +63,17 @@ spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: v5.6.4
|
||||
version: v6.3.0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: v5.6.4
|
||||
version: v6.3.0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
serviceAccountName: elasticsearch-logging
|
||||
containers:
|
||||
- image: k8s.gcr.io/elasticsearch:v5.6.4
|
||||
- image: k8s.gcr.io/elasticsearch:v6.3.0
|
||||
name: elasticsearch-logging
|
||||
resources:
|
||||
# need more cpu upon initialization, therefore burstable class
|
||||
|
44
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml
generated
vendored
44
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml
generated
vendored
@ -1,7 +1,7 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: fluentd-es-config-v0.1.4
|
||||
name: fluentd-es-config-v0.1.6
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
@ -115,7 +115,6 @@ data:
|
||||
@type tail
|
||||
path /var/log/containers/*.log
|
||||
pos_file /var/log/es-containers.log.pos
|
||||
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
||||
tag raw.kubernetes.*
|
||||
read_from_head true
|
||||
<parse>
|
||||
@ -273,21 +272,6 @@ data:
|
||||
tag kube-scheduler
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
|
||||
<source>
|
||||
@id rescheduler.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/rescheduler.log
|
||||
pos_file /var/log/es-rescheduler.log.pos
|
||||
tag rescheduler
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
|
||||
<source>
|
||||
@ -323,10 +307,11 @@ data:
|
||||
<source>
|
||||
@id journald-docker
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "docker.service" }]
|
||||
matches [{ "_SYSTEMD_UNIT": "docker.service" }]
|
||||
<storage>
|
||||
@type local
|
||||
persistent true
|
||||
path /var/log/journald-docker.pos
|
||||
</storage>
|
||||
read_from_head true
|
||||
tag docker
|
||||
@ -335,10 +320,11 @@ data:
|
||||
<source>
|
||||
@id journald-container-runtime
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "{{ container_runtime }}.service" }]
|
||||
matches [{ "_SYSTEMD_UNIT": "{{ fluentd_container_runtime_service }}.service" }]
|
||||
<storage>
|
||||
@type local
|
||||
persistent true
|
||||
path /var/log/journald-container-runtime.pos
|
||||
</storage>
|
||||
read_from_head true
|
||||
tag container-runtime
|
||||
@ -347,10 +333,11 @@ data:
|
||||
<source>
|
||||
@id journald-kubelet
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
|
||||
matches [{ "_SYSTEMD_UNIT": "kubelet.service" }]
|
||||
<storage>
|
||||
@type local
|
||||
persistent true
|
||||
path /var/log/journald-kubelet.pos
|
||||
</storage>
|
||||
read_from_head true
|
||||
tag kubelet
|
||||
@ -359,22 +346,24 @@ data:
|
||||
<source>
|
||||
@id journald-node-problem-detector
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
|
||||
matches [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
|
||||
<storage>
|
||||
@type local
|
||||
persistent true
|
||||
path /var/log/journald-node-problem-detector.pos
|
||||
</storage>
|
||||
read_from_head true
|
||||
tag node-problem-detector
|
||||
</source>
|
||||
|
||||
|
||||
<source>
|
||||
@id kernel
|
||||
@type systemd
|
||||
filters [{ "_TRANSPORT": "kernel" }]
|
||||
matches [{ "_TRANSPORT": "kernel" }]
|
||||
<storage>
|
||||
@type local
|
||||
persistent true
|
||||
path /var/log/kernel.pos
|
||||
</storage>
|
||||
<entry>
|
||||
fields_strip_underscores true
|
||||
@ -431,10 +420,19 @@ data:
|
||||
@type kubernetes_metadata
|
||||
</filter>
|
||||
|
||||
# Concatenate multi-line logs
|
||||
<filter **>
|
||||
@type concat
|
||||
key message
|
||||
multiline_end_regexp /\n$/
|
||||
separator ""
|
||||
</filter>
|
||||
|
||||
<match **>
|
||||
@id elasticsearch
|
||||
@type elasticsearch
|
||||
@log_level info
|
||||
type_name fluentd
|
||||
include_tag_key true
|
||||
host elasticsearch-logging
|
||||
port 9200
|
||||
|
12
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml
generated
vendored
12
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml
generated
vendored
@ -48,24 +48,24 @@ roleRef:
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluentd-es-v2.0.4
|
||||
name: fluentd-es-v2.2.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: fluentd-es
|
||||
version: v2.0.4
|
||||
version: v2.2.1
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: fluentd-es
|
||||
version: v2.0.4
|
||||
version: v2.2.1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: fluentd-es
|
||||
kubernetes.io/cluster-service: "true"
|
||||
version: v2.0.4
|
||||
version: v2.2.1
|
||||
# This annotation ensures that fluentd does not get evicted if the node
|
||||
# supports critical pod annotation based priority scheme.
|
||||
# Note that this does not guarantee admission on the nodes (#40573).
|
||||
@ -77,7 +77,7 @@ spec:
|
||||
serviceAccountName: fluentd-es
|
||||
containers:
|
||||
- name: fluentd-es
|
||||
image: k8s.gcr.io/fluentd-elasticsearch:v2.0.4
|
||||
image: k8s.gcr.io/fluentd-elasticsearch:v2.2.0
|
||||
env:
|
||||
- name: FLUENTD_ARGS
|
||||
value: --no-supervisor -q
|
||||
@ -107,4 +107,4 @@ spec:
|
||||
path: /var/lib/docker/containers
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: fluentd-es-config-v0.1.4
|
||||
name: fluentd-es-config-v0.1.6
|
||||
|
@ -55,4 +55,4 @@ EXPOSE 80
|
||||
ENV LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
|
||||
|
||||
# Start Fluentd to pick up our config that watches Docker container logs.
|
||||
CMD /run.sh $FLUENTD_ARGS
|
||||
CMD ["/run.sh"]
|
||||
|
17
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Gemfile
generated
vendored
17
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Gemfile
generated
vendored
@ -1,11 +1,12 @@
|
||||
source 'https://rubygems.org'
|
||||
|
||||
gem 'fluentd', '<=1.1.0'
|
||||
gem 'activesupport', '~>5.1.4'
|
||||
gem 'fluent-plugin-kubernetes_metadata_filter', '~>1.0.0'
|
||||
gem 'fluent-plugin-elasticsearch', '~>2.4.1'
|
||||
gem 'fluent-plugin-systemd', '~>0.3.1'
|
||||
gem 'fluent-plugin-detect-exceptions', '~>0.0.9'
|
||||
gem 'fluent-plugin-prometheus', '~>0.3.0'
|
||||
gem 'fluentd', '<=1.2.4'
|
||||
gem 'activesupport', '~>5.2.1'
|
||||
gem 'fluent-plugin-concat', '~>2.3.0'
|
||||
gem 'fluent-plugin-detect-exceptions', '~>0.0.11'
|
||||
gem 'fluent-plugin-elasticsearch', '~>2.11.5'
|
||||
gem 'fluent-plugin-kubernetes_metadata_filter', '~>2.0.0'
|
||||
gem 'fluent-plugin-multi-format-parser', '~>1.0.0'
|
||||
gem 'oj', '~>3.3.1.0'
|
||||
gem 'fluent-plugin-prometheus', '~>1.0.1'
|
||||
gem 'fluent-plugin-systemd', '~>1.0.1'
|
||||
gem 'oj', '~>3.6.5'
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
PREFIX = staging-k8s.gcr.io
|
||||
IMAGE = fluentd-elasticsearch
|
||||
TAG = v2.0.4
|
||||
TAG = v2.3.1
|
||||
|
||||
build:
|
||||
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .
|
||||
|
@ -20,4 +20,4 @@
|
||||
# For systems without journald
|
||||
mkdir -p /var/log/journal
|
||||
|
||||
exec /usr/local/bin/fluentd $@
|
||||
exec /usr/local/bin/fluentd $FLUENTD_ARGS
|
||||
|
6
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml
generated
vendored
6
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml
generated
vendored
@ -21,7 +21,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: kibana-logging
|
||||
image: docker.elastic.co/kibana/kibana:5.6.4
|
||||
image: docker.elastic.co/kibana/kibana-oss:6.3.2
|
||||
resources:
|
||||
# need more cpu upon initialization, therefore burstable class
|
||||
limits:
|
||||
@ -33,10 +33,6 @@ spec:
|
||||
value: http://elasticsearch-logging:9200
|
||||
- name: SERVER_BASEPATH
|
||||
value: /api/v1/namespaces/kube-system/services/kibana-logging/proxy
|
||||
- name: XPACK_MONITORING_ENABLED
|
||||
value: "false"
|
||||
- name: XPACK_SECURITY_ENABLED
|
||||
value: "false"
|
||||
ports:
|
||||
- containerPort: 5601
|
||||
name: ui
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/OWNERS
generated
vendored
@ -4,3 +4,5 @@ approvers:
|
||||
reviewers:
|
||||
- piosz
|
||||
- x13n
|
||||
labels:
|
||||
- sig/gcp
|
||||
|
10
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/event-exporter.yaml
generated
vendored
10
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/event-exporter.yaml
generated
vendored
@ -29,11 +29,11 @@ subjects:
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: event-exporter-v0.2.1
|
||||
name: event-exporter-v0.2.3
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: event-exporter
|
||||
version: v0.2.1
|
||||
version: v0.2.3
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
@ -42,18 +42,18 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: event-exporter
|
||||
version: v0.2.1
|
||||
version: v0.2.3
|
||||
spec:
|
||||
serviceAccountName: event-exporter-sa
|
||||
containers:
|
||||
- name: event-exporter
|
||||
image: k8s.gcr.io/event-exporter:v0.2.1
|
||||
image: k8s.gcr.io/event-exporter:v0.2.3
|
||||
command:
|
||||
- /event-exporter
|
||||
- -sink-opts=-stackdriver-resource-model={{ exporter_sd_resource_model }}
|
||||
# BEGIN_PROMETHEUS_TO_SD
|
||||
- name: prometheus-to-sd-exporter
|
||||
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
|
||||
image: k8s.gcr.io/prometheus-to-sd:v0.3.1
|
||||
command:
|
||||
- /monitor
|
||||
- --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons
|
||||
|
40
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-configmap-old.yaml
generated
vendored
40
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-configmap-old.yaml
generated
vendored
@ -61,16 +61,18 @@ data:
|
||||
# reform.var.log.containers.<POD_NAME>_<NAMESPACE_NAME>_<CONTAINER_NAME>-<CONTAINER_ID>.log
|
||||
tag reform.*
|
||||
read_from_head true
|
||||
format multi_format
|
||||
<pattern>
|
||||
format json
|
||||
time_key time
|
||||
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
||||
</pattern>
|
||||
<pattern>
|
||||
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
|
||||
time_format %Y-%m-%dT%H:%M:%S.%N%:z
|
||||
</pattern>
|
||||
<parse>
|
||||
@type multi_format
|
||||
<pattern>
|
||||
format json
|
||||
time_key time
|
||||
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
||||
</pattern>
|
||||
<pattern>
|
||||
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
|
||||
time_format %Y-%m-%dT%H:%M:%S.%N%:z
|
||||
</pattern>
|
||||
</parse>
|
||||
</source>
|
||||
|
||||
<filter reform.**>
|
||||
@ -210,20 +212,6 @@ data:
|
||||
tag kube-scheduler
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
|
||||
<source>
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/rescheduler.log
|
||||
pos_file /var/log/gcp-rescheduler.log.pos
|
||||
tag rescheduler
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
|
||||
<source>
|
||||
@ -265,7 +253,7 @@ data:
|
||||
|
||||
<source>
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "{{ container_runtime }}.service" }]
|
||||
filters [{ "_SYSTEMD_UNIT": "{{ fluentd_container_runtime_service }}.service" }]
|
||||
pos_file /var/log/gcp-journald-container-runtime.pos
|
||||
read_from_head true
|
||||
tag container-runtime
|
||||
@ -301,7 +289,7 @@ data:
|
||||
@type grep
|
||||
<exclude>
|
||||
key _SYSTEMD_UNIT
|
||||
pattern ^(docker|{{ container_runtime }}|kubelet|node-problem-detector)\.service$
|
||||
pattern ^(docker|{{ fluentd_container_runtime_service }}|kubelet|node-problem-detector)\.service$
|
||||
</exclude>
|
||||
</filter>
|
||||
# END_NODE_JOURNAL
|
||||
|
68
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml
generated
vendored
68
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml
generated
vendored
@ -64,16 +64,18 @@ data:
|
||||
# reform.var.log.containers.<POD_NAME>_<NAMESPACE_NAME>_<CONTAINER_NAME>-<CONTAINER_ID>.log
|
||||
tag reform.*
|
||||
read_from_head true
|
||||
format multi_format
|
||||
<pattern>
|
||||
format json
|
||||
time_key time
|
||||
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
||||
</pattern>
|
||||
<pattern>
|
||||
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
|
||||
time_format %Y-%m-%dT%H:%M:%S.%N%:z
|
||||
</pattern>
|
||||
<parse>
|
||||
@type multi_format
|
||||
<pattern>
|
||||
format json
|
||||
time_key time
|
||||
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
||||
</pattern>
|
||||
<pattern>
|
||||
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
|
||||
time_format %Y-%m-%dT%H:%M:%S.%N%:z
|
||||
</pattern>
|
||||
</parse>
|
||||
</source>
|
||||
|
||||
<filter reform.**>
|
||||
@ -98,6 +100,8 @@ data:
|
||||
# instead of jsonPayload after extracting 'time', 'severity' and
|
||||
# 'stream' from the record.
|
||||
message ${record['log']}
|
||||
# If 'severity' is not set, assume stderr is ERROR and stdout is INFO.
|
||||
severity ${record['severity'] || if record['stream'] == 'stderr' then 'ERROR' else 'INFO' end}
|
||||
</record>
|
||||
tag ${if record['stream'] == 'stderr' then 'raw.stderr' else 'raw.stdout' end}
|
||||
remove_keys stream,log
|
||||
@ -108,8 +112,8 @@ data:
|
||||
@type detect_exceptions
|
||||
|
||||
remove_tag_prefix raw
|
||||
message log
|
||||
stream stream
|
||||
message message
|
||||
stream "logging.googleapis.com/local_resource_id"
|
||||
multiline_flush_interval 5
|
||||
max_bytes 500000
|
||||
max_lines 1000
|
||||
@ -223,20 +227,6 @@ data:
|
||||
tag kube-scheduler
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
|
||||
<source>
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/rescheduler.log
|
||||
pos_file /var/log/gcp-rescheduler.log.pos
|
||||
tag rescheduler
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
|
||||
<source>
|
||||
@ -278,7 +268,7 @@ data:
|
||||
|
||||
<source>
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "{{ container_runtime }}.service" }]
|
||||
filters [{ "_SYSTEMD_UNIT": "{{ fluentd_container_runtime_service }}.service" }]
|
||||
pos_file /var/log/gcp-journald-container-runtime.pos
|
||||
read_from_head true
|
||||
tag container-runtime
|
||||
@ -314,7 +304,7 @@ data:
|
||||
@type grep
|
||||
<exclude>
|
||||
key _SYSTEMD_UNIT
|
||||
pattern ^(docker|{{ container_runtime }}|kubelet|node-problem-detector)\.service$
|
||||
pattern ^(docker|{{ fluentd_container_runtime_service }}|kubelet|node-problem-detector)\.service$
|
||||
</exclude>
|
||||
</filter>
|
||||
# END_NODE_JOURNAL
|
||||
@ -386,6 +376,12 @@ data:
|
||||
@type null
|
||||
</match>
|
||||
|
||||
# Add a unique insertId to each log entry that doesn't already have it.
|
||||
# This helps guarantee the order and prevent log duplication.
|
||||
<filter **>
|
||||
@type add_insert_ids
|
||||
</filter>
|
||||
|
||||
# This section is exclusive for k8s_container logs. These logs come with
|
||||
# 'stderr'/'stdout' tags.
|
||||
# We use a separate output stanza for 'k8s_node' logs with a smaller buffer
|
||||
@ -408,9 +404,9 @@ data:
|
||||
buffer_queue_full_action block
|
||||
# Set the chunk limit conservatively to avoid exceeding the recommended
|
||||
# chunk size of 5MB per write request.
|
||||
buffer_chunk_limit 1M
|
||||
buffer_chunk_limit 512k
|
||||
# Cap the combined memory usage of this buffer and the one below to
|
||||
# 1MiB/chunk * (6 + 2) chunks = 8 MiB
|
||||
# 512KiB/chunk * (6 + 2) chunks = 4 MiB
|
||||
buffer_queue_limit 6
|
||||
# Never wait more than 5 seconds before flushing logs in the non-error case.
|
||||
flush_interval 5s
|
||||
@ -421,8 +417,9 @@ data:
|
||||
# Use multiple threads for processing.
|
||||
num_threads 2
|
||||
use_grpc true
|
||||
# Use Metadata Agent to get monitored resource.
|
||||
enable_metadata_agent true
|
||||
# Skip timestamp adjustment as this is in a controlled environment with
|
||||
# known timestamp format. This helps with CPU usage.
|
||||
adjust_invalid_timestamps false
|
||||
</match>
|
||||
|
||||
# Attach local_resource_id for 'k8s_node' monitored resource.
|
||||
@ -450,15 +447,16 @@ data:
|
||||
buffer_type file
|
||||
buffer_path /var/log/fluentd-buffers/kubernetes.system.buffer
|
||||
buffer_queue_full_action block
|
||||
buffer_chunk_limit 1M
|
||||
buffer_chunk_limit 512k
|
||||
buffer_queue_limit 2
|
||||
flush_interval 5s
|
||||
max_retry_wait 30
|
||||
disable_retry_limit
|
||||
num_threads 2
|
||||
use_grpc true
|
||||
# Use Metadata Agent to get monitored resource.
|
||||
enable_metadata_agent true
|
||||
# Skip timestamp adjustment as this is in a controlled environment with
|
||||
# known timestamp format. This helps with CPU usage.
|
||||
adjust_invalid_timestamps false
|
||||
</match>
|
||||
metadata:
|
||||
name: fluentd-gcp-config-v1.2.5
|
||||
|
13
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml
generated
vendored
13
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml
generated
vendored
@ -1,13 +1,13 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluentd-gcp-v3.0.0
|
||||
name: fluentd-gcp-{{ fluentd_gcp_yaml_version }}
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: fluentd-gcp
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v3.0.0
|
||||
version: {{ fluentd_gcp_yaml_version }}
|
||||
spec:
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
@ -16,7 +16,7 @@ spec:
|
||||
labels:
|
||||
k8s-app: fluentd-gcp
|
||||
kubernetes.io/cluster-service: "true"
|
||||
version: v3.0.0
|
||||
version: {{ fluentd_gcp_yaml_version }}
|
||||
# This annotation ensures that fluentd does not get evicted if the node
|
||||
# supports critical pod annotation based priority scheme.
|
||||
# Note that this does not guarantee admission on the nodes (#40573).
|
||||
@ -26,6 +26,7 @@ spec:
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: fluentd-gcp
|
||||
dnsPolicy: Default
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: fluentd-gcp
|
||||
image: gcr.io/stackdriver-agents/stackdriver-logging-agent:{{ fluentd_gcp_version }}
|
||||
@ -79,7 +80,7 @@ spec:
|
||||
fi;
|
||||
# BEGIN_PROMETHEUS_TO_SD
|
||||
- name: prometheus-to-sd-exporter
|
||||
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
|
||||
image: k8s.gcr.io/prometheus-to-sd:v0.3.1
|
||||
command:
|
||||
- /monitor
|
||||
- --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons
|
||||
@ -99,14 +100,12 @@ spec:
|
||||
# END_PROMETHEUS_TO_SD
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/fluentd-ds-ready: "true"
|
||||
terminationGracePeriodSeconds: 60
|
||||
tolerations:
|
||||
- key: "node.alpha.kubernetes.io/ismaster"
|
||||
effect: "NoSchedule"
|
||||
- operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
- operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: varlog
|
||||
hostPath:
|
||||
|
@ -19,6 +19,7 @@ spec:
|
||||
volumes:
|
||||
- 'hostPath'
|
||||
- 'secret'
|
||||
- 'projected'
|
||||
# TODO: This only needs a hostPath to read /etc/ssl/certs,
|
||||
# but it should be able to just include these in the image.
|
||||
allowedHostPaths:
|
||||
|
@ -20,6 +20,7 @@ spec:
|
||||
- 'configMap'
|
||||
- 'hostPath'
|
||||
- 'secret'
|
||||
- 'projected'
|
||||
allowedHostPaths:
|
||||
- pathPrefix: /var/log
|
||||
- pathPrefix: /var/lib/docker/containers
|
||||
|
6
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/scaler-deployment.yaml
generated
vendored
6
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/scaler-deployment.yaml
generated
vendored
@ -5,7 +5,7 @@ metadata:
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: fluentd-gcp-scaler
|
||||
version: v0.3.0
|
||||
version: v0.5.0
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
selector:
|
||||
@ -19,10 +19,10 @@ spec:
|
||||
serviceAccountName: fluentd-gcp-scaler
|
||||
containers:
|
||||
- name: fluentd-gcp-scaler
|
||||
image: k8s.gcr.io/fluentd-gcp-scaler:0.3
|
||||
image: k8s.gcr.io/fluentd-gcp-scaler:0.5
|
||||
command:
|
||||
- /scaler.sh
|
||||
- --ds-name=fluentd-gcp-v3.0.0
|
||||
- --ds-name=fluentd-gcp-{{ fluentd_gcp_yaml_version }}
|
||||
- --scaling-policy=fluentd-gcp-scaling-policy
|
||||
env:
|
||||
# Defaults, used if no overrides are found in fluentd-gcp-scaling-policy
|
||||
|
8
vendor/k8s.io/kubernetes/cluster/addons/ip-masq-agent/ip-masq-agent.yaml
generated
vendored
8
vendor/k8s.io/kubernetes/cluster/addons/ip-masq-agent/ip-masq-agent.yaml
generated
vendored
@ -29,7 +29,9 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: ip-masq-agent
|
||||
image: k8s.gcr.io/ip-masq-agent-amd64:v2.0.2
|
||||
image: k8s.gcr.io/ip-masq-agent-amd64:v2.1.1
|
||||
args:
|
||||
- --masq-chain=IP-MASQ
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
@ -52,5 +54,9 @@ spec:
|
||||
- key: config
|
||||
path: ip-masq-agent
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
|
3945
vendor/k8s.io/kubernetes/cluster/addons/istio/auth/istio-auth.yaml
generated
vendored
3945
vendor/k8s.io/kubernetes/cluster/addons/istio/auth/istio-auth.yaml
generated
vendored
File diff suppressed because it is too large
Load Diff
3932
vendor/k8s.io/kubernetes/cluster/addons/istio/noauth/istio.yaml
generated
vendored
3932
vendor/k8s.io/kubernetes/cluster/addons/istio/noauth/istio.yaml
generated
vendored
File diff suppressed because it is too large
Load Diff
6
vendor/k8s.io/kubernetes/cluster/addons/kube-proxy/OWNERS
generated
vendored
6
vendor/k8s.io/kubernetes/cluster/addons/kube-proxy/OWNERS
generated
vendored
@ -1,12 +1,10 @@
|
||||
approvers:
|
||||
- bowei
|
||||
- dnardo
|
||||
- freehan
|
||||
- nicksardo
|
||||
- mrhohn
|
||||
- jingax10
|
||||
reviewers:
|
||||
- bowei
|
||||
- dnardo
|
||||
- freehan
|
||||
- nicksardo
|
||||
- mrhohn
|
||||
- jingax10
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/addons/metadata-agent/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/metadata-agent/OWNERS
generated
vendored
@ -1,6 +1,8 @@
|
||||
approvers:
|
||||
- kawych
|
||||
- piosz
|
||||
- x13n
|
||||
reviewers:
|
||||
- kawych
|
||||
- piosz
|
||||
- x13n
|
||||
|
@ -7,9 +7,7 @@ metadata:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
- "apps"
|
||||
- "extensions"
|
||||
- "*"
|
||||
resources:
|
||||
- "*"
|
||||
verbs:
|
||||
|
85
vendor/k8s.io/kubernetes/cluster/addons/metadata-agent/stackdriver/metadata-agent.yaml
generated
vendored
85
vendor/k8s.io/kubernetes/cluster/addons/metadata-agent/stackdriver/metadata-agent.yaml
generated
vendored
@ -7,22 +7,6 @@ metadata:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: metadata-agent-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
data:
|
||||
node_level.conf: |-
|
||||
KubernetesUseWatch: true
|
||||
KubernetesClusterLevelMetadata: false
|
||||
cluster_level.conf: |-
|
||||
KubernetesUseWatch: true
|
||||
KubernetesClusterLevelMetadata: true
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
@ -44,28 +28,24 @@ spec:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
serviceAccountName: metadata-agent
|
||||
priorityClassName: system-node-critical
|
||||
containers:
|
||||
- image: gcr.io/stackdriver-agents/stackdriver-metadata-agent:0.2-0.0.19-1
|
||||
- image: gcr.io/stackdriver-agents/stackdriver-metadata-agent:0.2-0.0.21-1
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: metadata-agent
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
if [[ -f /var/run/metadata-agent/health/unhealthy ]]; then
|
||||
exit 1;
|
||||
fi
|
||||
periodSeconds: 10
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8000
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 60
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 1
|
||||
successThreshold: 1
|
||||
volumeMounts:
|
||||
- name: metadata-agent-config-volume
|
||||
mountPath: /etc/config
|
||||
command:
|
||||
- /opt/stackdriver/metadata/sbin/metadatad
|
||||
- --config-file=/etc/config/node_level.conf
|
||||
args:
|
||||
- -o KubernetesUseWatch=true
|
||||
- -o KubernetesClusterLevelMetadata=false
|
||||
- -o MetadataReporterPurgeDeleted=true
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
hostPort: 8799
|
||||
@ -78,10 +58,11 @@ spec:
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: metadata-agent-config-volume
|
||||
configMap:
|
||||
name: metadata-agent-config
|
||||
tolerations:
|
||||
- operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
- operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
@ -109,28 +90,24 @@ spec:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
serviceAccountName: metadata-agent
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: gcr.io/stackdriver-agents/stackdriver-metadata-agent:0.2-0.0.19-1
|
||||
- image: gcr.io/stackdriver-agents/stackdriver-metadata-agent:0.2-0.0.21-1
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: metadata-agent
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
if [[ -f /var/run/metadata-agent/health/unhealthy ]]; then
|
||||
exit 1;
|
||||
fi
|
||||
periodSeconds: 10
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8000
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 60
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 1
|
||||
successThreshold: 1
|
||||
volumeMounts:
|
||||
- name: metadata-agent-config-volume
|
||||
mountPath: /etc/config
|
||||
command:
|
||||
- /opt/stackdriver/metadata/sbin/metadatad
|
||||
- --config-file=/etc/config/cluster_level.conf
|
||||
args:
|
||||
- -o KubernetesUseWatch=true
|
||||
- -o KubernetesClusterLevelMetadata=true
|
||||
- -o MetadataReporterPurgeDeleted=true
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
@ -142,10 +119,6 @@ spec:
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: metadata-agent-config-volume
|
||||
configMap:
|
||||
name: metadata-agent-config
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
|
16
vendor/k8s.io/kubernetes/cluster/addons/metadata-agent/stackdriver/podsecuritypolicies/metadata-agent-psp-binding.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/cluster/addons/metadata-agent/stackdriver/podsecuritypolicies/metadata-agent-psp-binding.yaml
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: gce:podsecuritypolicy:metadata-agent
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/cluster-service: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: gce:podsecuritypolicy:privileged
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metadata-agent
|
||||
namespace: kube-system
|
4
vendor/k8s.io/kubernetes/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml
generated
vendored
4
vendor/k8s.io/kubernetes/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml
generated
vendored
@ -44,7 +44,7 @@ spec:
|
||||
effect: "NoSchedule"
|
||||
containers:
|
||||
- name: metadata-proxy
|
||||
image: k8s.gcr.io/metadata-proxy:v0.1.9
|
||||
image: k8s.gcr.io/metadata-proxy:v0.1.10
|
||||
securityContext:
|
||||
privileged: true
|
||||
# Request and limit resources to get guaranteed QoS.
|
||||
@ -57,7 +57,7 @@ spec:
|
||||
cpu: "30m"
|
||||
# BEGIN_PROMETHEUS_TO_SD
|
||||
- name: prometheus-to-sd-exporter
|
||||
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
|
||||
image: k8s.gcr.io/prometheus-to-sd:v0.3.1
|
||||
# Request and limit resources to get guaranteed QoS.
|
||||
resources:
|
||||
requests:
|
||||
|
29
vendor/k8s.io/kubernetes/cluster/addons/metrics-server/metrics-server-deployment.yaml
generated
vendored
29
vendor/k8s.io/kubernetes/cluster/addons/metrics-server/metrics-server-deployment.yaml
generated
vendored
@ -23,24 +23,24 @@ data:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: metrics-server-v0.2.1
|
||||
name: metrics-server-v0.3.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v0.2.1
|
||||
version: v0.3.1
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: metrics-server
|
||||
version: v0.2.1
|
||||
version: v0.3.1
|
||||
template:
|
||||
metadata:
|
||||
name: metrics-server
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
version: v0.2.1
|
||||
version: v0.3.1
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
@ -49,16 +49,20 @@ spec:
|
||||
serviceAccountName: metrics-server
|
||||
containers:
|
||||
- name: metrics-server
|
||||
image: k8s.gcr.io/metrics-server-amd64:v0.2.1
|
||||
image: k8s.gcr.io/metrics-server-amd64:v0.3.1
|
||||
command:
|
||||
- /metrics-server
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --metric-resolution=30s
|
||||
# These are needed for GKE, which doesn't support secure communication yet.
|
||||
# Remove these lines for non-GKE clusters, and when GKE supports token-based auth.
|
||||
- --kubelet-port=10255
|
||||
- --deprecated-kubelet-completely-insecure=true
|
||||
ports:
|
||||
- containerPort: 443
|
||||
name: https
|
||||
protocol: TCP
|
||||
- name: metrics-server-nanny
|
||||
image: k8s.gcr.io/addon-resizer:1.8.1
|
||||
image: k8s.gcr.io/addon-resizer:1.8.4
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
@ -81,15 +85,18 @@ spec:
|
||||
command:
|
||||
- /pod_nanny
|
||||
- --config-dir=/etc/config
|
||||
- --cpu=40m
|
||||
- --cpu={{ base_metrics_server_cpu }}
|
||||
- --extra-cpu=0.5m
|
||||
- --memory=40Mi
|
||||
- --extra-memory=4Mi
|
||||
- --memory={{ base_metrics_server_memory }}
|
||||
- --extra-memory={{ metrics_server_memory_per_node }}Mi
|
||||
- --threshold=5
|
||||
- --deployment=metrics-server-v0.2.1
|
||||
- --deployment=metrics-server-v0.3.1
|
||||
- --container=metrics-server
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
# Specifies the smallest cluster (defined in number of nodes)
|
||||
# resources will be scaled to.
|
||||
- --minClusterSize={{ metrics_server_min_cluster_size }}
|
||||
volumes:
|
||||
- name: metrics-server-config-volume
|
||||
configMap:
|
||||
|
26
vendor/k8s.io/kubernetes/cluster/addons/prometheus/kube-state-metrics-deployment.yaml
generated
vendored
26
vendor/k8s.io/kubernetes/cluster/addons/prometheus/kube-state-metrics-deployment.yaml
generated
vendored
@ -39,7 +39,7 @@ spec:
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
- name: addon-resizer
|
||||
image: k8s.gcr.io/addon-resizer:1.7
|
||||
image: k8s.gcr.io/addon-resizer:1.8.4
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
@ -56,8 +56,12 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config
|
||||
command:
|
||||
- /pod_nanny
|
||||
- --config-dir=/etc/config
|
||||
- --container=kube-state-metrics
|
||||
- --cpu=100m
|
||||
- --extra-cpu=1m
|
||||
@ -65,3 +69,23 @@ spec:
|
||||
- --extra-memory=2Mi
|
||||
- --threshold=5
|
||||
- --deployment=kube-state-metrics
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: kube-state-metrics-config
|
||||
---
|
||||
# Config map for resource configuration.
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kube-state-metrics-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-state-metrics
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
data:
|
||||
NannyConfiguration: |-
|
||||
apiVersion: nannyconfig/v1alpha1
|
||||
kind: NannyConfiguration
|
||||
|
||||
|
17
vendor/k8s.io/kubernetes/cluster/addons/python-image/Dockerfile
generated
vendored
17
vendor/k8s.io/kubernetes/cluster/addons/python-image/Dockerfile
generated
vendored
@ -1,17 +0,0 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM python:2.7-slim
|
||||
|
||||
RUN pip install pyyaml
|
25
vendor/k8s.io/kubernetes/cluster/addons/python-image/Makefile
generated
vendored
25
vendor/k8s.io/kubernetes/cluster/addons/python-image/Makefile
generated
vendored
@ -1,25 +0,0 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
IMAGE=staging-k8s.gcr.io/python
|
||||
VERSION=v1
|
||||
|
||||
.PHONY: build push
|
||||
|
||||
build:
|
||||
docker build --pull -t "$(IMAGE):$(VERSION)" .
|
||||
|
||||
push:
|
||||
docker push "$(IMAGE):$(VERSION)"
|
||||
|
6
vendor/k8s.io/kubernetes/cluster/addons/python-image/README.md
generated
vendored
6
vendor/k8s.io/kubernetes/cluster/addons/python-image/README.md
generated
vendored
@ -1,6 +0,0 @@
|
||||
# Python image
|
||||
|
||||
The python image here is used by OS distros that don't have python installed to
|
||||
run python scripts to parse the yaml files in the addon updater script.
|
||||
|
||||
[]()
|
@ -33,6 +33,9 @@ rules:
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["statefulsets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["jobs"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
5
vendor/k8s.io/kubernetes/cluster/addons/runtimeclass/OWNERS
generated
vendored
Normal file
5
vendor/k8s.io/kubernetes/cluster/addons/runtimeclass/OWNERS
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
approvers:
|
||||
- tallclair
|
||||
- dchen1107
|
||||
reviewers:
|
||||
- sig-node-reviewers
|
12
vendor/k8s.io/kubernetes/cluster/addons/runtimeclass/README.md
generated
vendored
Normal file
12
vendor/k8s.io/kubernetes/cluster/addons/runtimeclass/README.md
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
# RuntimeClass
|
||||
|
||||
RuntimeClass is an alpha feature for supporting multiple container runtimes within a cluster. When
|
||||
enabled, pods can select a RuntimeClass to run with using the `PodSpec.RuntimeClassName` field.
|
||||
|
||||
To enable RuntimeClass, set the feature gate `RuntimeClass=true`, and ensure the CRD defined in this
|
||||
directory is installed.
|
||||
|
||||
For more information, see:
|
||||
https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md
|
||||
|
||||
[]()
|
26
vendor/k8s.io/kubernetes/cluster/addons/runtimeclass/runtimeclass_crd.yaml
generated
vendored
Normal file
26
vendor/k8s.io/kubernetes/cluster/addons/runtimeclass/runtimeclass_crd.yaml
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
kind: CustomResourceDefinition
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: runtimeclasses.node.k8s.io
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
group: node.k8s.io
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
names:
|
||||
plural: runtimeclasses
|
||||
singular: runtimeclass
|
||||
kind: RuntimeClass
|
||||
scope: Cluster
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
spec:
|
||||
properties:
|
||||
runtimeHandler:
|
||||
type: string
|
||||
pattern: '^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)?$'
|
2
vendor/k8s.io/kubernetes/cluster/addons/storage-class/aws/default.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/storage-class/aws/default.yaml
generated
vendored
@ -3,7 +3,7 @@ kind: StorageClass
|
||||
metadata:
|
||||
name: gp2
|
||||
annotations:
|
||||
storageclass.beta.kubernetes.io/is-default-class: "true"
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/addons/storage-class/azure/default.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/storage-class/azure/default.yaml
generated
vendored
@ -3,7 +3,7 @@ kind: StorageClass
|
||||
metadata:
|
||||
name: standard
|
||||
annotations:
|
||||
storageclass.beta.kubernetes.io/is-default-class: "true"
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/addons/storage-class/gce/default.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/storage-class/gce/default.yaml
generated
vendored
@ -3,7 +3,7 @@ kind: StorageClass
|
||||
metadata:
|
||||
name: standard
|
||||
annotations:
|
||||
storageclass.beta.kubernetes.io/is-default-class: "true"
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
|
4
vendor/k8s.io/kubernetes/cluster/addons/storage-class/local/default.yaml
generated
vendored
4
vendor/k8s.io/kubernetes/cluster/addons/storage-class/local/default.yaml
generated
vendored
@ -4,7 +4,7 @@ metadata:
|
||||
namespace: kube-system
|
||||
name: standard
|
||||
annotations:
|
||||
storageclass.beta.kubernetes.io/is-default-class: "true"
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
provisioner: kubernetes.io/host-path
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/addons/storage-class/openstack/default.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/storage-class/openstack/default.yaml
generated
vendored
@ -3,7 +3,7 @@ kind: StorageClass
|
||||
metadata:
|
||||
name: standard
|
||||
annotations:
|
||||
storageclass.beta.kubernetes.io/is-default-class: "true"
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/addons/storage-class/vsphere/default.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/storage-class/vsphere/default.yaml
generated
vendored
@ -3,7 +3,7 @@ kind: StorageClass
|
||||
metadata:
|
||||
name: thin
|
||||
annotations:
|
||||
storageclass.beta.kubernetes.io/is-default-class: "true"
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
provisioner: kubernetes.io/vsphere-volume
|
||||
|
6
vendor/k8s.io/kubernetes/cluster/addons/storage-crds/OWNERS
generated
vendored
Normal file
6
vendor/k8s.io/kubernetes/cluster/addons/storage-crds/OWNERS
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
approvers:
|
||||
- saad-ali
|
||||
- jsafrane
|
||||
- msau42
|
||||
reviewers:
|
||||
- davidz627
|
13
vendor/k8s.io/kubernetes/cluster/addons/storage-crds/README.md
generated
vendored
Normal file
13
vendor/k8s.io/kubernetes/cluster/addons/storage-crds/README.md
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
# Kubernetes CSI CRDs
|
||||
|
||||
The Kubernetes Container Storage Interface implementation defines some API objects as CRDs that Kubernetes components
|
||||
including the Attach/Detach controller depend on.
|
||||
|
||||
If you are using CSI, it is recommended that you enable the relevant feature gates (e.g. `CSIDriverRegistry`, `CSINodeInfo`, etc.), and ensure the CRDs in this directory are installed.
|
||||
|
||||
These objects and their CRDs are defined in `staging/src/k8s.io/csi-api/pkg/crd/manifests`, the source of truth.
|
||||
They are copied from that CRD manifest directory to this addon directory.
|
||||
A unit test in `staging/src/k8s.io/csi-api/pkg/crd` verifies that this (and any other) copies of the manifest outside of `staging/src/k8s.io/csi-api/pkg/crd/manifests` do not drift from that source of truth.
|
||||
If you need to make changes please make changes in the `staging/src/k8s.io/csi-api/pkg/crd/manifests` directory and then update this copy.
|
||||
|
||||
For more information, see: https://kubernetes-csi.github.io/docs/
|
28
vendor/k8s.io/kubernetes/cluster/addons/storage-crds/csidriver.yaml
generated
vendored
Normal file
28
vendor/k8s.io/kubernetes/cluster/addons/storage-crds/csidriver.yaml
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: csidrivers.csi.storage.k8s.io
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
group: csi.storage.k8s.io
|
||||
names:
|
||||
kind: CSIDriver
|
||||
plural: csidrivers
|
||||
scope: Cluster
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
spec:
|
||||
description: Specification of the CSI Driver.
|
||||
properties:
|
||||
attachRequired:
|
||||
description: Indicates this CSI volume driver requires an attach operation,
|
||||
and that Kubernetes should call attach and wait for any attach operation
|
||||
to complete before proceeding to mount.
|
||||
type: boolean
|
||||
podInfoOnMountVersion:
|
||||
description: Indicates this CSI volume driver requires additional pod
|
||||
information (like podName, podUID, etc.) during mount operations.
|
||||
type: string
|
||||
version: v1alpha1
|
54
vendor/k8s.io/kubernetes/cluster/addons/storage-crds/csinodeinfo.yaml
generated
vendored
Normal file
54
vendor/k8s.io/kubernetes/cluster/addons/storage-crds/csinodeinfo.yaml
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: csinodeinfos.csi.storage.k8s.io
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
group: csi.storage.k8s.io
|
||||
names:
|
||||
kind: CSINodeInfo
|
||||
plural: csinodeinfos
|
||||
scope: Cluster
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
spec:
|
||||
description: Specification of CSINodeInfo
|
||||
properties:
|
||||
drivers:
|
||||
description: List of CSI drivers running on the node and their specs.
|
||||
type: array
|
||||
items:
|
||||
properties:
|
||||
name:
|
||||
description: The CSI driver that this object refers to.
|
||||
type: string
|
||||
nodeID:
|
||||
description: The node from the driver point of view.
|
||||
type: string
|
||||
topologyKeys:
|
||||
description: List of keys supported by the driver.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
status:
|
||||
description: Status of CSINodeInfo
|
||||
properties:
|
||||
drivers:
|
||||
description: List of CSI drivers running on the node and their statuses.
|
||||
type: array
|
||||
items:
|
||||
properties:
|
||||
name:
|
||||
description: The CSI driver that this object refers to.
|
||||
type: string
|
||||
available:
|
||||
description: Whether the CSI driver is installed.
|
||||
type: boolean
|
||||
volumePluginMechanism:
|
||||
description: Indicates to external components the required mechanism
|
||||
to use for any in-tree plugins replaced by this driver.
|
||||
pattern: in-tree|csi
|
||||
type: string
|
||||
version: v1alpha1
|
Reference in New Issue
Block a user