mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor files
This commit is contained in:
41
vendor/k8s.io/kubernetes/cluster/addons/BUILD
generated
vendored
Normal file
41
vendor/k8s.io/kubernetes/cluster/addons/BUILD
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load("@io_bazel//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
|
||||
|
||||
filegroup(
|
||||
name = "addon-srcs",
|
||||
srcs = glob(
|
||||
[
|
||||
"**/*.json",
|
||||
"**/*.yaml",
|
||||
"**/*.yaml.in",
|
||||
],
|
||||
exclude = ["**/*demo*/**"],
|
||||
),
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "addons",
|
||||
extension = "tar.gz",
|
||||
files = [
|
||||
":addon-srcs",
|
||||
],
|
||||
mode = "0644",
|
||||
strip_prefix = ".",
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//cluster/addons/fluentd-elasticsearch/es-image:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
36
vendor/k8s.io/kubernetes/cluster/addons/README.md
generated
vendored
Normal file
36
vendor/k8s.io/kubernetes/cluster/addons/README.md
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
# Legacy Cluster add-ons
|
||||
|
||||
For more information on add-ons see [the documentation](https://kubernetes.io/docs/concepts/cluster-administration/addons/).
|
||||
|
||||
## Overview
|
||||
|
||||
Cluster add-ons are resources like Services and Deployments (with pods) that are
|
||||
shipped with the Kubernetes binaries and are considered an inherent part of the
|
||||
Kubernetes clusters.
|
||||
|
||||
There are currently two classes of add-ons:
|
||||
- Add-ons that will be reconciled.
|
||||
- Add-ons that will be created if they don't exist.
|
||||
|
||||
More details could be found in [addon-manager/README.md](addon-manager/README.md).
|
||||
|
||||
## Cooperating Horizontal / Vertical Auto-Scaling with "reconcile class addons"
|
||||
|
||||
"Reconcile" class addons will be periodically reconciled to the original state given
|
||||
by the initial config. In order to make Horizontal / Vertical Auto-scaling functional,
|
||||
the related fields in config should be left unset. More specifically, leave `replicas`
|
||||
in `ReplicationController` / `Deployment` / `ReplicaSet` unset for Horizontal Scaling,
|
||||
leave `resources` for container unset for Vertical Scaling. The periodic reconcile
|
||||
won't clobbered these fields, hence they could be managed by Horizontal / Vertical
|
||||
Auto-scaler.
|
||||
|
||||
## Add-on naming
|
||||
|
||||
The suggested naming for most of the resources is `<basename>` (with no version number).
|
||||
Though resources like `Pod`, `ReplicationController` and `DaemonSet` are exceptional.
|
||||
It would be hard to update `Pod` because many fields in `Pod` are immutable. For
|
||||
`ReplicationController` and `DaemonSet`, in-place update may not trigger the underlying
|
||||
pods to be re-created. You probably need to change their names during update to trigger
|
||||
a complete deletion and creation.
|
||||
|
||||
[]()
|
54
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/CHANGELOG.md
generated
vendored
Normal file
54
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
### Version 8.4 (Thu November 30 2017 zou nengren @zouyee)
|
||||
- Update kubectl to v1.8.4.
|
||||
|
||||
### Version 6.4-beta.2 (Mon June 12 2017 Jeff Grafton <jgrafton@google.com>)
|
||||
- Update kubectl to v1.6.4.
|
||||
- Refresh base images.
|
||||
|
||||
### Version 6.4-beta.1 (Wed March 8 2017 Zihong Zheng <zihongz@google.com>)
|
||||
- Create EnsureExists class addons before Reconcile class addons.
|
||||
|
||||
### Version 6.4-alpha.3 (Fri February 24 2017 Zihong Zheng <zihongz@google.com>)
|
||||
- Support 'ensure exist' class addon and use addon-manager specific label.
|
||||
|
||||
### Version 6.4-alpha.2 (Wed February 16 2017 Zihong Zheng <zihongz@google.com>)
|
||||
- Update kubectl to v1.6.0-alpha.2 to use HPA in autoscaling/v1 instead of extensions/v1beta1.
|
||||
|
||||
### Version 6.4-alpha.1 (Wed February 1 2017 Zihong Zheng <zihongz@google.com>)
|
||||
- Update kubectl to v1.6.0-alpha.1 for supporting optional ConfigMap.
|
||||
|
||||
### Version 6.3 (Fri January 27 2017 Lucas Käldström <lucas.kaldstrom@hotmail.co.uk>)
|
||||
- Updated the arm base image to `armhf/busybox` and now using qemu v2.7 for emulation.
|
||||
|
||||
### Version 6.2 (Thu January 12 2017 Zihong Zheng <zihongz@google.com>)
|
||||
- Update kubectl to the stable version.
|
||||
|
||||
### Version 6.1 (Tue November 29 2016 Zihong Zheng <zihongz@google.com>)
|
||||
- Support pruning old Deployments.
|
||||
|
||||
### Version 6.0 (Fri November 18 2016 Zihong Zheng <zihongz@google.com>)
|
||||
- Upgrade Addon Manager to use `kubectl apply`.
|
||||
|
||||
### Version 5.2 (Wed October 26 2016 Zihong Zheng <zihongz@google.com>)
|
||||
- Added support for ConfigMap and upgraded kubectl version to v1.4.4 (pr #35255)
|
||||
|
||||
### Version 5.1 (Mon Jul 4 2016 Marek Grabowski <gmarek@google.com>)
|
||||
- Fixed the way addon-manager handles non-namespaced objects
|
||||
|
||||
### Version 5 (Fri Jun 24 2016 Jerzy Szczepkowski @jszczepkowski)
|
||||
- Added PetSet support to addon manager
|
||||
|
||||
### Version 4 (Tue Jun 21 2016 Mike Danese @mikedanese)
|
||||
- Increased addon check interval
|
||||
|
||||
### Version 3 (Sun Jun 19 2016 Lucas Käldström @luxas)
|
||||
- Bumped up addon-manager to v3
|
||||
|
||||
### Version 2 (Fri May 20 2016 Lucas Käldström @luxas)
|
||||
- Removed deprecated kubectl command, added support for DaemonSets
|
||||
|
||||
### Version 1 (Thu May 5 2016 Mike Danese @mikedanese)
|
||||
- Run kube-addon-manager in a pod
|
||||
|
||||
|
||||
[]()
|
21
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/Dockerfile
generated
vendored
Normal file
21
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM BASEIMAGE
|
||||
|
||||
ADD kube-addons.sh /opt/
|
||||
ADD namespace.yaml /opt/
|
||||
ADD kubectl /usr/local/bin/
|
||||
|
||||
CMD ["/opt/kube-addons.sh"]
|
58
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/Makefile
generated
vendored
Normal file
58
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/Makefile
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
IMAGE=gcr.io/google-containers/kube-addon-manager
|
||||
ARCH?=amd64
|
||||
TEMP_DIR:=$(shell mktemp -d)
|
||||
VERSION=v8.4
|
||||
KUBECTL_VERSION?=v1.8.4
|
||||
|
||||
ifeq ($(ARCH),amd64)
|
||||
BASEIMAGE?=bashell/alpine-bash
|
||||
endif
|
||||
ifeq ($(ARCH),arm)
|
||||
BASEIMAGE?=arm32v7/debian
|
||||
endif
|
||||
ifeq ($(ARCH),arm64)
|
||||
BASEIMAGE?=arm64v8/debian
|
||||
endif
|
||||
ifeq ($(ARCH),ppc64le)
|
||||
BASEIMAGE?=ppc64le/debian
|
||||
endif
|
||||
ifeq ($(ARCH),s390x)
|
||||
BASEIMAGE?=s390x/debian
|
||||
endif
|
||||
|
||||
.PHONY: build push
|
||||
|
||||
all: build
|
||||
|
||||
build:
|
||||
cp ./* $(TEMP_DIR)
|
||||
curl -sSL --retry 5 https://dl.k8s.io/release/$(KUBECTL_VERSION)/bin/linux/$(ARCH)/kubectl > $(TEMP_DIR)/kubectl
|
||||
chmod +x $(TEMP_DIR)/kubectl
|
||||
cd $(TEMP_DIR) && sed -i.back "s|BASEIMAGE|$(BASEIMAGE)|g" Dockerfile
|
||||
docker build --pull -t $(IMAGE)-$(ARCH):$(VERSION) $(TEMP_DIR)
|
||||
|
||||
push: build
|
||||
gcloud docker -- push $(IMAGE)-$(ARCH):$(VERSION)
|
||||
ifeq ($(ARCH),amd64)
|
||||
# Backward compatibility. TODO: deprecate this image tag
|
||||
docker rmi $(IMAGE):$(VERSION) 2>/dev/null || true
|
||||
docker tag $(IMAGE)-$(ARCH):$(VERSION) $(IMAGE):$(VERSION)
|
||||
gcloud docker -- push $(IMAGE):$(VERSION)
|
||||
endif
|
||||
|
||||
clean:
|
||||
docker rmi -f $(IMAGE)-$(ARCH):$(VERSION)
|
62
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/README.md
generated
vendored
Normal file
62
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/README.md
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
### Addon-manager
|
||||
|
||||
addon-manager manages two classes of addons with given template files in
|
||||
`$ADDON_PATH` (default `/etc/kubernetes/addons/`).
|
||||
- Addons with label `addonmanager.kubernetes.io/mode=Reconcile` will be periodically
|
||||
reconciled. Direct manipulation to these addons through apiserver is discouraged because
|
||||
addon-manager will bring them back to the original state. In particular:
|
||||
- Addon will be re-created if it is deleted.
|
||||
- Addon will be reconfigured to the state given by the supplied fields in the template
|
||||
file periodically.
|
||||
- Addon will be deleted when its manifest file is deleted from the `$ADDON_PATH`.
|
||||
- Addons with label `addonmanager.kubernetes.io/mode=EnsureExists` will be checked for
|
||||
existence only. Users can edit these addons as they want. In particular:
|
||||
- Addon will only be created/re-created with the given template file when there is no
|
||||
instance of the resource with that name.
|
||||
- Addon will not be deleted when the manifest file is deleted from the `$ADDON_PATH`.
|
||||
|
||||
Notes:
|
||||
- Label `kubernetes.io/cluster-service=true` is deprecated (only for Addon Manager).
|
||||
In future release (after one year), Addon Manager may not respect it anymore. Addons
|
||||
have this label but without `addonmanager.kubernetes.io/mode=EnsureExists` will be
|
||||
treated as "reconcile class addons" for now.
|
||||
- Resources under `$ADDON_PATH` need to have either one of these two labels.
|
||||
Meanwhile namespaced resources need to be in `kube-system` namespace.
|
||||
Otherwise it will be omitted.
|
||||
- The above label and namespace rule does not stand for `/opt/namespace.yaml` and
|
||||
resources under `/etc/kubernetes/admission-controls/`. addon-manager will attempt to
|
||||
create them regardless during startup.
|
||||
|
||||
#### How to release
|
||||
|
||||
The `addon-manager` is built for multiple architectures.
|
||||
|
||||
1. Change something in the source
|
||||
2. Bump `VERSION` in the `Makefile`
|
||||
3. Bump `KUBECTL_VERSION` in the `Makefile` if required
|
||||
4. Build the `amd64` image and test it on a cluster
|
||||
5. Push all images
|
||||
|
||||
```console
|
||||
# Build for linux/amd64 (default)
|
||||
$ make push ARCH=amd64
|
||||
# ---> gcr.io/google-containers/kube-addon-manager-amd64:VERSION
|
||||
# ---> gcr.io/google-containers/kube-addon-manager:VERSION (image with backwards-compatible naming)
|
||||
|
||||
$ make push ARCH=arm
|
||||
# ---> gcr.io/google-containers/kube-addon-manager-arm:VERSION
|
||||
|
||||
$ make push ARCH=arm64
|
||||
# ---> gcr.io/google-containers/kube-addon-manager-arm64:VERSION
|
||||
|
||||
$ make push ARCH=ppc64le
|
||||
# ---> gcr.io/google-containers/kube-addon-manager-ppc64le:VERSION
|
||||
|
||||
$ make push ARCH=s390x
|
||||
# ---> gcr.io/google-containers/kube-addon-manager-s390x:VERSION
|
||||
```
|
||||
|
||||
If you don't want to push the images, run `make` or `make build` instead
|
||||
|
||||
|
||||
[]()
|
215
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/kube-addons.sh
generated
vendored
Executable file
215
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/kube-addons.sh
generated
vendored
Executable file
@ -0,0 +1,215 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# LIMITATIONS
|
||||
# 1. Exit code is probably not always correct.
|
||||
# 2. There are no unittests.
|
||||
# 3. Will not work if the total length of paths to addons is greater than
|
||||
# bash can handle. Probably it is not a problem: ARG_MAX=2097152 on GCE.
|
||||
|
||||
# cosmetic improvements to be done
|
||||
# 1. Improve the log function; add timestamp, file name, etc.
|
||||
# 2. Logging doesn't work from files that print things out.
|
||||
# 3. Kubectl prints the output to stderr (the output should be captured and then
|
||||
# logged)
|
||||
|
||||
KUBECTL=${KUBECTL_BIN:-/usr/local/bin/kubectl}
|
||||
KUBECTL_OPTS=${KUBECTL_OPTS:-}
|
||||
|
||||
ADDON_CHECK_INTERVAL_SEC=${TEST_ADDON_CHECK_INTERVAL_SEC:-60}
|
||||
ADDON_PATH=${ADDON_PATH:-/etc/kubernetes/addons}
|
||||
|
||||
SYSTEM_NAMESPACE=kube-system
|
||||
|
||||
# Addons could use this label with two modes:
|
||||
# - ADDON_MANAGER_LABEL=Reconcile
|
||||
# - ADDON_MANAGER_LABEL=EnsureExists
|
||||
ADDON_MANAGER_LABEL="addonmanager.kubernetes.io/mode"
|
||||
# This label is deprecated (only for Addon Manager). In future release
|
||||
# addon-manager may not respect it anymore. Addons with
|
||||
# CLUSTER_SERVICE_LABEL=true and without ADDON_MANAGER_LABEL=EnsureExists
|
||||
# will be reconciled for now.
|
||||
CLUSTER_SERVICE_LABEL="kubernetes.io/cluster-service"
|
||||
|
||||
# Whether only one addon manager should be running in a multi-master setup.
|
||||
# Disabling this flag will force all addon managers to assume they are the
|
||||
# leaders.
|
||||
ADDON_MANAGER_LEADER_ELECTION=${ADDON_MANAGER_LEADER_ELECTION:-true}
|
||||
|
||||
# Remember that you can't log from functions that print some output (because
|
||||
# logs are also printed on stdout).
|
||||
# $1 level
|
||||
# $2 message
|
||||
function log() {
|
||||
# manage log levels manually here
|
||||
|
||||
# add the timestamp if you find it useful
|
||||
case $1 in
|
||||
DB3 )
|
||||
# echo "$1: $2"
|
||||
;;
|
||||
DB2 )
|
||||
# echo "$1: $2"
|
||||
;;
|
||||
DBG )
|
||||
# echo "$1: $2"
|
||||
;;
|
||||
INFO )
|
||||
echo "$1: $2"
|
||||
;;
|
||||
WRN )
|
||||
echo "$1: $2"
|
||||
;;
|
||||
ERR )
|
||||
echo "$1: $2"
|
||||
;;
|
||||
* )
|
||||
echo "INVALID_LOG_LEVEL $1: $2"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# $1 filename of addon to start.
|
||||
# $2 count of tries to start the addon.
|
||||
# $3 delay in seconds between two consecutive tries
|
||||
# $4 namespace
|
||||
function start_addon() {
|
||||
local -r addon_filename=$1;
|
||||
local -r tries=$2;
|
||||
local -r delay=$3;
|
||||
local -r namespace=$4
|
||||
|
||||
create_resource_from_string "$(cat ${addon_filename})" "${tries}" "${delay}" "${addon_filename}" "${namespace}"
|
||||
}
|
||||
|
||||
# $1 string with json or yaml.
|
||||
# $2 count of tries to start the addon.
|
||||
# $3 delay in seconds between two consecutive tries
|
||||
# $4 name of this object to use when logging about it.
|
||||
# $5 namespace for this object
|
||||
function create_resource_from_string() {
|
||||
local -r config_string=$1;
|
||||
local tries=$2;
|
||||
local -r delay=$3;
|
||||
local -r config_name=$4;
|
||||
local -r namespace=$5;
|
||||
while [ ${tries} -gt 0 ]; do
|
||||
echo "${config_string}" | ${KUBECTL} ${KUBECTL_OPTS} --namespace="${namespace}" apply -f - && \
|
||||
log INFO "== Successfully started ${config_name} in namespace ${namespace} at $(date -Is)" && \
|
||||
return 0;
|
||||
let tries=tries-1;
|
||||
log WRN "== Failed to start ${config_name} in namespace ${namespace} at $(date -Is). ${tries} tries remaining. =="
|
||||
sleep ${delay};
|
||||
done
|
||||
return 1;
|
||||
}
|
||||
|
||||
function reconcile_addons() {
|
||||
# TODO: Remove the first command in future release.
|
||||
# Adding this for backward compatibility. Old addons have CLUSTER_SERVICE_LABEL=true and don't have
|
||||
# ADDON_MANAGER_LABEL=EnsureExists will still be reconciled.
|
||||
# Filter out `configured` message to not noisily log.
|
||||
# `created`, `pruned` and errors will be logged.
|
||||
log INFO "== Reconciling with deprecated label =="
|
||||
${KUBECTL} ${KUBECTL_OPTS} apply --namespace=${SYSTEM_NAMESPACE} -f ${ADDON_PATH} \
|
||||
-l ${CLUSTER_SERVICE_LABEL}=true,${ADDON_MANAGER_LABEL}!=EnsureExists \
|
||||
--prune=true --recursive | grep -v configured
|
||||
|
||||
log INFO "== Reconciling with addon-manager label =="
|
||||
${KUBECTL} ${KUBECTL_OPTS} apply --namespace=${SYSTEM_NAMESPACE} -f ${ADDON_PATH} \
|
||||
-l ${CLUSTER_SERVICE_LABEL}!=true,${ADDON_MANAGER_LABEL}=Reconcile \
|
||||
--prune=true --recursive | grep -v configured
|
||||
|
||||
log INFO "== Kubernetes addon reconcile completed at $(date -Is) =="
|
||||
}
|
||||
|
||||
function ensure_addons() {
|
||||
# Create objects already exist should fail.
|
||||
# Filter out `AlreadyExists` message to not noisily log.
|
||||
${KUBECTL} ${KUBECTL_OPTS} create --namespace=${SYSTEM_NAMESPACE} -f ${ADDON_PATH} \
|
||||
-l ${ADDON_MANAGER_LABEL}=EnsureExists --recursive 2>&1 | grep -v AlreadyExists
|
||||
|
||||
log INFO "== Kubernetes addon ensure completed at $(date -Is) =="
|
||||
}
|
||||
|
||||
function is_leader() {
|
||||
# In multi-master setup, only one addon manager should be running. We use
|
||||
# existing leader election in kube-controller-manager instead of implementing
|
||||
# a separate mechanism here.
|
||||
if ! $ADDON_MANAGER_LEADER_ELECTION; then
|
||||
log INFO "Leader election disabled."
|
||||
return 0;
|
||||
fi
|
||||
KUBE_CONTROLLER_MANAGER_LEADER=`${KUBECTL} -n kube-system get ep kube-controller-manager \
|
||||
-o go-template=$'{{index .metadata.annotations "control-plane.alpha.kubernetes.io/leader"}}' \
|
||||
| sed 's/^.*"holderIdentity":"\([^"]*\)".*/\1/'`
|
||||
# If there was any problem with getting the leader election results, var will
|
||||
# be empty. Since it's better to have multiple addon managers than no addon
|
||||
# managers at all, we're going to assume that we're the leader in such case.
|
||||
log INFO "Leader is $KUBE_CONTROLLER_MANAGER_LEADER"
|
||||
[[ "$KUBE_CONTROLLER_MANAGER_LEADER" == "" ||
|
||||
"$HOSTNAME" == "$KUBE_CONTROLLER_MANAGER_LEADER" ]]
|
||||
}
|
||||
|
||||
# The business logic for whether a given object should be created
|
||||
# was already enforced by salt, and /etc/kubernetes/addons is the
|
||||
# managed result is of that. Start everything below that directory.
|
||||
log INFO "== Kubernetes addon manager started at $(date -Is) with ADDON_CHECK_INTERVAL_SEC=${ADDON_CHECK_INTERVAL_SEC} =="
|
||||
|
||||
# Create the namespace that will be used to host the cluster-level add-ons.
|
||||
start_addon /opt/namespace.yaml 100 10 "" &
|
||||
|
||||
# Wait for the default service account to be created in the kube-system namespace.
|
||||
token_found=""
|
||||
while [ -z "${token_found}" ]; do
|
||||
sleep .5
|
||||
token_found=$(${KUBECTL} ${KUBECTL_OPTS} get --namespace="${SYSTEM_NAMESPACE}" serviceaccount default -o go-template="{{with index .secrets 0}}{{.name}}{{end}}")
|
||||
if [[ $? -ne 0 ]]; then
|
||||
token_found="";
|
||||
log WRN "== Error getting default service account, retry in 0.5 second =="
|
||||
fi
|
||||
done
|
||||
|
||||
log INFO "== Default service account in the ${SYSTEM_NAMESPACE} namespace has token ${token_found} =="
|
||||
|
||||
# Create admission_control objects if defined before any other addon services. If the limits
|
||||
# are defined in a namespace other than default, we should still create the limits for the
|
||||
# default namespace.
|
||||
for obj in $(find /etc/kubernetes/admission-controls \( -name \*.yaml -o -name \*.json \)); do
|
||||
start_addon "${obj}" 100 10 default &
|
||||
log INFO "++ obj ${obj} is created ++"
|
||||
done
|
||||
|
||||
# Start the apply loop.
|
||||
# Check if the configuration has changed recently - in case the user
|
||||
# created/updated/deleted the files on the master.
|
||||
log INFO "== Entering periodical apply loop at $(date -Is) =="
|
||||
while true; do
|
||||
start_sec=$(date +"%s")
|
||||
if is_leader; then
|
||||
ensure_addons
|
||||
reconcile_addons
|
||||
else
|
||||
log INFO "Not elected leader, going back to sleep."
|
||||
fi
|
||||
end_sec=$(date +"%s")
|
||||
len_sec=$((${end_sec}-${start_sec}))
|
||||
# subtract the time passed from the sleep time
|
||||
if [[ ${len_sec} -lt ${ADDON_CHECK_INTERVAL_SEC} ]]; then
|
||||
sleep_time=$((${ADDON_CHECK_INTERVAL_SEC}-${len_sec}))
|
||||
sleep ${sleep_time}
|
||||
fi
|
||||
done
|
4
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/namespace.yaml
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/namespace.yaml
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kube-system
|
6
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/MAINTAINERS.md
generated
vendored
Normal file
6
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/MAINTAINERS.md
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
# Maintainers
|
||||
|
||||
Matt Dupre <matt@projectcalico.org>, Casey Davenport <casey@tigera.io> and committers to the https://github.com/projectcalico/k8s-policy repository.
|
||||
|
||||
|
||||
[]()
|
10
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/OWNERS
generated
vendored
Normal file
10
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/OWNERS
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
approvers:
|
||||
- bowei
|
||||
- caseydavenport
|
||||
- dnardo
|
||||
- fasaxc
|
||||
reviewers:
|
||||
- bowei
|
||||
- caseydavenport
|
||||
- dnardo
|
||||
- fasaxc
|
14
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/README.md
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/README.md
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
# Calico Policy Controller
|
||||
==============
|
||||
|
||||
Calico is an implementation of the Kubernetes network policy API. The provided manifests install:
|
||||
|
||||
- A DaemonSet which runs Calico on each node in the cluster.
|
||||
- A Deployment which installs the Calico Typha agent.
|
||||
- A Service for the Calico Typha agent.
|
||||
|
||||
### Learn More
|
||||
|
||||
Learn more about Calico at https://docs.projectcalico.org
|
||||
|
||||
[]()
|
67
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-clusterrole.yaml
generated
vendored
Normal file
67
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-clusterrole.yaml
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: calico
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- watch
|
||||
- apiGroups: ["extensions"]
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- globalfelixconfigs
|
||||
- globalbgpconfigs
|
||||
- ippools
|
||||
- globalnetworkpolicies
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- watch
|
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-clusterrolebinding.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-clusterrolebinding.yaml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico
|
||||
namespace: kube-system
|
14
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-cpva-clusterrole.yaml
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-cpva-clusterrole.yaml
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: calico-cpva
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["list"]
|
||||
- apiGroups: ["apps", "extensions"]
|
||||
resources: ["deployments", "daemonsets"]
|
||||
verbs: ["patch"]
|
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-cpva-clusterrolebinding.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-cpva-clusterrolebinding.yaml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: calico-cpva
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-cpva
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: calico-cpva
|
||||
apiGroup: rbac.authorization.k8s.io
|
8
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-cpva-serviceaccount.yaml
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-cpva-serviceaccount.yaml
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
kind: ServiceAccount
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: calico-cpva
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
153
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-node-daemonset.yaml
generated
vendored
Normal file
153
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-node-daemonset.yaml
generated
vendored
Normal file
@ -0,0 +1,153 @@
|
||||
kind: DaemonSet
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
k8s-app: calico-node
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: calico-node
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
nodeSelector:
|
||||
projectcalico.org/ds-ready: "true"
|
||||
hostNetwork: true
|
||||
serviceAccountName: calico
|
||||
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
|
||||
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
|
||||
terminationGracePeriodSeconds: 0
|
||||
containers:
|
||||
# Runs calico/node container on each Kubernetes node. This
|
||||
# container programs network policy and routes on each
|
||||
# host.
|
||||
- name: calico-node
|
||||
image: calico/node:v2.6.1
|
||||
env:
|
||||
- name: CALICO_DISABLE_FILE_LOGGING
|
||||
value: "true"
|
||||
- name: CALICO_NETWORKING_BACKEND
|
||||
value: "none"
|
||||
- name: DATASTORE_TYPE
|
||||
value: "kubernetes"
|
||||
- name: FELIX_TYPHAK8SSERVICENAME
|
||||
value: "calico-typha"
|
||||
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
|
||||
value: "ACCEPT"
|
||||
- name: FELIX_IPV6SUPPORT
|
||||
value: "false"
|
||||
- name: FELIX_LOGSEVERITYSYS
|
||||
value: "none"
|
||||
- name: FELIX_PROMETHEUSMETRICSENABLED
|
||||
value: "true"
|
||||
- name: FELIX_HEALTHENABLED
|
||||
value: "true"
|
||||
- name: IP
|
||||
value: ""
|
||||
- name: NO_DEFAULT_POOLS
|
||||
value: "true"
|
||||
- name: NODENAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: WAIT_FOR_DATASTORE
|
||||
value: "true"
|
||||
securityContext:
|
||||
privileged: true
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /liveness
|
||||
port: 9099
|
||||
periodSeconds: 10
|
||||
initialDelaySeconds: 10
|
||||
failureThreshold: 6
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: 9099
|
||||
periodSeconds: 10
|
||||
volumeMounts:
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /etc/calico
|
||||
name: etc-calico
|
||||
readOnly: true
|
||||
# This container installs the Calico CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
image: calico/cni:v1.11.0
|
||||
command: ["/install-cni.sh"]
|
||||
env:
|
||||
- name: CNI_CONF_NAME
|
||||
value: "10-calico.conflist"
|
||||
- name: CNI_NETWORK_CONFIG
|
||||
value: |-
|
||||
{
|
||||
"name": "k8s-pod-network",
|
||||
"cniVersion": "0.3.0",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "calico",
|
||||
"log_level": "debug",
|
||||
"datastore_type": "kubernetes",
|
||||
"nodename": "__KUBERNETES_NODE_NAME__",
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "usePodCidr"
|
||||
},
|
||||
"policy": {
|
||||
"type": "k8s",
|
||||
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
|
||||
},
|
||||
"kubernetes": {
|
||||
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
|
||||
"kubeconfig": "__KUBECONFIG_FILEPATH__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "portmap",
|
||||
"capabilities": {"portMappings": true},
|
||||
"snat": true
|
||||
}
|
||||
]
|
||||
}
|
||||
- name: KUBERNETES_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-bin-dir
|
||||
- mountPath: /host/etc/cni/net.d
|
||||
name: cni-net-dir
|
||||
volumes:
|
||||
# Used to ensure proper kmods are installed.
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
# Mount in the Felix config file from the host.
|
||||
- name: etc-calico
|
||||
hostPath:
|
||||
path: /etc/calico
|
||||
# Used to install CNI binaries.
|
||||
- name: cni-bin-dir
|
||||
hostPath:
|
||||
path: __CALICO_CNI_DIR__
|
||||
# Used to install CNI network config.
|
||||
- name: cni-net-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
22
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-node-vertical-autoscaler-configmap.yaml
generated
vendored
Normal file
22
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-node-vertical-autoscaler-configmap.yaml
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: calico-node-vertical-autoscaler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
data:
|
||||
node-autoscaler: |-
|
||||
{
|
||||
"calico-node": {
|
||||
"requests": {
|
||||
"cpu": {
|
||||
"base": "80m",
|
||||
"step": "20m",
|
||||
"nodesPerStep": 10,
|
||||
"max": "500m"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
37
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-node-vertical-autoscaler-deployment.yaml
generated
vendored
Normal file
37
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-node-vertical-autoscaler-deployment.yaml
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
kind: Deployment
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
name: calico-node-vertical-autoscaler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-node-autoscaler
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-node-autoscaler
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/cpvpa-amd64:v0.6.0
|
||||
name: autoscaler
|
||||
command:
|
||||
- /cpvpa
|
||||
- --target=daemonset/calico-node
|
||||
- --namespace=kube-system
|
||||
- --logtostderr=true
|
||||
- --poll-period-seconds=30
|
||||
- --v=2
|
||||
- --config-file=/etc/config/node-autoscaler
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/config
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: calico-node-vertical-autoscaler
|
||||
serviceAccountName: calico-cpva
|
8
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-serviceaccount.yaml
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-serviceaccount.yaml
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
16
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/globalbgpconfig-crd.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/globalbgpconfig-crd.yaml
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
description: Calico Global BGP Configuration
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalbgpconfigs.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: GlobalBGPConfig
|
||||
plural: globalbgpconfigs
|
||||
singular: globalbgpconfig
|
16
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/globalfelixconfig-crd.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/globalfelixconfig-crd.yaml
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
description: Calico Global Felix Configuration
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalfelixconfigs.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: GlobalFelixConfig
|
||||
plural: globalfelixconfigs
|
||||
singular: globalfelixconfig
|
16
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/globalnetworkpolicy-crd.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/globalnetworkpolicy-crd.yaml
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
description: Calico Global Network Policies
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalnetworkpolicies.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: GlobalNetworkPolicy
|
||||
plural: globalnetworkpolicies
|
||||
singular: globalnetworkpolicy
|
16
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/ippool-crd.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/ippool-crd.yaml
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
description: Calico IP Pools
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: ippools.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: IPPool
|
||||
plural: ippools
|
||||
singular: ippool
|
16
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/podsecuritypolicies/calico-node-psp-binding.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/podsecuritypolicies/calico-node-psp-binding.yaml
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: gce:podsecuritypolicy:calico
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/cluster-service: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: gce:podsecuritypolicy:privileged
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico
|
||||
namespace: kube-system
|
68
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-deployment.yaml
generated
vendored
Normal file
68
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-deployment.yaml
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: calico-typha
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
k8s-app: calico-typha
|
||||
spec:
|
||||
revisionHistoryLimit: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-typha
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
hostNetwork: true
|
||||
serviceAccountName: calico
|
||||
containers:
|
||||
- image: calico/typha:v0.5.1
|
||||
name: calico-typha
|
||||
ports:
|
||||
- containerPort: 5473
|
||||
name: calico-typha
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: TYPHA_LOGFILEPATH
|
||||
value: "none"
|
||||
- name: TYPHA_LOGSEVERITYSYS
|
||||
value: "none"
|
||||
- name: TYPHA_LOGSEVERITYSCREEN
|
||||
value: "info"
|
||||
- name: TYPHA_PROMETHEUSMETRICSENABLED
|
||||
value: "true"
|
||||
- name: TYPHA_CONNECTIONREBALANCINGMODE
|
||||
value: "kubernetes"
|
||||
- name: TYPHA_PROMETHEUSMETRICSPORT
|
||||
value: "9093"
|
||||
- name: TYPHA_DATASTORETYPE
|
||||
value: "kubernetes"
|
||||
- name: TYPHA_MAXCONNECTIONSLOWERLIMIT
|
||||
value: "1"
|
||||
- name: TYPHA_HEALTHENABLED
|
||||
value: "true"
|
||||
volumeMounts:
|
||||
- mountPath: /etc/calico
|
||||
name: etc-calico
|
||||
readOnly: true
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /liveness
|
||||
port: 9098
|
||||
periodSeconds: 30
|
||||
initialDelaySeconds: 30
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: 9098
|
||||
periodSeconds: 10
|
||||
volumes:
|
||||
- name: etc-calico
|
||||
hostPath:
|
||||
path: /etc/calico
|
11
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-clusterrole.yaml
generated
vendored
Normal file
11
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-clusterrole.yaml
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: typha-cpha
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["list"]
|
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-clusterrolebinding.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-clusterrolebinding.yaml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: typha-cpha
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: typha-cpha
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: typha-cpha
|
||||
namespace: kube-system
|
24
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-configmap.yaml
generated
vendored
Normal file
24
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-configmap.yaml
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: calico-typha-horizontal-autoscaler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
data:
|
||||
ladder: |-
|
||||
{
|
||||
"coresToReplicas": [],
|
||||
"nodesToReplicas":
|
||||
[
|
||||
[1, 1],
|
||||
[10, 2],
|
||||
[100, 3],
|
||||
[250, 4],
|
||||
[500, 5],
|
||||
[1000, 6],
|
||||
[1500, 7],
|
||||
[2000, 8]
|
||||
]
|
||||
}
|
34
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-deployment.yaml
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-deployment.yaml
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: calico-typha-horizontal-autoscaler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-typha-autoscaler
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-typha-autoscaler
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.2
|
||||
name: autoscaler
|
||||
command:
|
||||
- /cluster-proportional-autoscaler
|
||||
- --namespace=kube-system
|
||||
- --configmap=calico-typha-horizontal-autoscaler
|
||||
- --target=deployment/calico-typha
|
||||
- --logtostderr=true
|
||||
- --v=2
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
limits:
|
||||
cpu: 10m
|
||||
serviceAccountName: typha-cpha
|
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-role.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-role.yaml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: typha-cpha
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: ["extensions"]
|
||||
resources: ["deployments/scale"]
|
||||
verbs: ["get", "update"]
|
16
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-rolebinding.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-rolebinding.yaml
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: typha-cpha
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: typha-cpha
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: typha-cpha
|
||||
namespace: kube-system
|
8
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-serviceaccount.yaml
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-serviceaccount.yaml
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: typha-cpha
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
17
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-service.yaml
generated
vendored
Normal file
17
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-service.yaml
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: calico-typha
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
k8s-app: calico-typha
|
||||
spec:
|
||||
ports:
|
||||
- port: 5473
|
||||
protocol: TCP
|
||||
targetPort: calico-typha
|
||||
name: calico-typha
|
||||
selector:
|
||||
k8s-app: calico-typha
|
22
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-configmap.yaml
generated
vendored
Normal file
22
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-configmap.yaml
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: calico-typha-vertical-autoscaler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
data:
|
||||
typha-autoscaler: |-
|
||||
{
|
||||
"calico-typha": {
|
||||
"requests": {
|
||||
"cpu": {
|
||||
"base": "120m",
|
||||
"step": "80m",
|
||||
"nodesPerStep": 10,
|
||||
"max": "1000m"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
37
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-deployment.yaml
generated
vendored
Normal file
37
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-deployment.yaml
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
kind: Deployment
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
name: calico-typha-vertical-autoscaler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-typha-autoscaler
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-typha-autoscaler
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/cpvpa-amd64:v0.6.0
|
||||
name: autoscaler
|
||||
command:
|
||||
- /cpvpa
|
||||
- --target=deployment/calico-typha
|
||||
- --namespace=kube-system
|
||||
- --logtostderr=true
|
||||
- --poll-period-seconds=30
|
||||
- --v=2
|
||||
- --config-file=/etc/config/typha-autoscaler
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/config
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: calico-typha-vertical-autoscaler
|
||||
serviceAccountName: calico-cpva
|
6
vendor/k8s.io/kubernetes/cluster/addons/cluster-loadbalancing/MAINTAINERS.md
generated
vendored
Normal file
6
vendor/k8s.io/kubernetes/cluster/addons/cluster-loadbalancing/MAINTAINERS.md
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
# Maintainers
|
||||
|
||||
Prashanth.B <beeps@google.com>
|
||||
|
||||
|
||||
[]()
|
6
vendor/k8s.io/kubernetes/cluster/addons/cluster-loadbalancing/OWNERS
generated
vendored
Normal file
6
vendor/k8s.io/kubernetes/cluster/addons/cluster-loadbalancing/OWNERS
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
approvers:
|
||||
- bowei
|
||||
- nicksardo
|
||||
reviewers:
|
||||
- bowei
|
||||
- nicksardo
|
112
vendor/k8s.io/kubernetes/cluster/addons/cluster-loadbalancing/glbc/README.md
generated
vendored
Normal file
112
vendor/k8s.io/kubernetes/cluster/addons/cluster-loadbalancing/glbc/README.md
generated
vendored
Normal file
@ -0,0 +1,112 @@
|
||||
# GCE Load-Balancer Controller (GLBC) Cluster Addon
|
||||
|
||||
This cluster addon is composed of:
|
||||
* A [Google L7 LoadBalancer Controller](https://github.com/kubernetes/contrib/tree/master/ingress/controllers/gce)
|
||||
* A [404 default backend](https://github.com/kubernetes/contrib/tree/master/404-server) Service + RC
|
||||
|
||||
It relies on the [Ingress resource](https://kubernetes.io/docs/user-guide/ingress.md) only available in Kubernetes version 1.1 and beyond.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before you can receive traffic through the GCE L7 Loadbalancer Controller you need:
|
||||
* A Working Kubernetes 1.1 cluster
|
||||
* At least 1 Kubernetes [NodePort Service](https://kubernetes.io/docs/user-guide/services.md#type-nodeport) (this is the endpoint for your Ingress)
|
||||
* Firewall-rules that allow traffic to the NodePort service, as indicated by `kubectl` at Service creation time
|
||||
* Adequate quota, as mentioned in the next section
|
||||
* A single instance of the L7 Loadbalancer Controller pod (if you're using the default GCE setup, this should already be running in the `kube-system` namespace)
|
||||
|
||||
## Quota
|
||||
|
||||
GLBC is not aware of your GCE quota. As of this writing users get 3 [GCE Backend Services](https://cloud.google.com/compute/docs/load-balancing/http/backend-service) by default. If you plan on creating Ingresses for multiple Kubernetes Services, remember that each one requires a backend service, and request quota. Should you fail to do so the controller will poll periodically and grab the first free backend service slot it finds. You can view your quota:
|
||||
|
||||
```console
|
||||
$ gcloud compute project-info describe --project myproject
|
||||
```
|
||||
See [GCE documentation](https://cloud.google.com/compute/docs/resource-quotas#checking_your_quota) for how to request more.
|
||||
|
||||
## Latency
|
||||
|
||||
It takes ~1m to spin up a loadbalancer (this includes acquiring the public ip), and ~5-6m before the GCE api starts healthchecking backends. So as far as latency goes, here's what to expect:
|
||||
|
||||
Assume one creates the following simple Ingress:
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: test-ingress
|
||||
spec:
|
||||
backend:
|
||||
# This will just loopback to the default backend of GLBC
|
||||
serviceName: default-http-backend
|
||||
servicePort: 80
|
||||
```
|
||||
|
||||
* time, t=0
|
||||
```console
|
||||
$ kubectl get ing
|
||||
NAME RULE BACKEND ADDRESS
|
||||
test-ingress - default-http-backend:80
|
||||
$ kubectl describe ing
|
||||
No events.
|
||||
```
|
||||
|
||||
* time, t=1m
|
||||
```console
|
||||
$ kubectl get ing
|
||||
NAME RULE BACKEND ADDRESS
|
||||
test-ingress - default-http-backend:80 130.211.5.27
|
||||
|
||||
$ kubectl describe ing
|
||||
target-proxy: k8s-tp-default-test-ingress
|
||||
url-map: k8s-um-default-test-ingress
|
||||
backends: {"k8s-be-32342":"UNKNOWN"}
|
||||
forwarding-rule: k8s-fw-default-test-ingress
|
||||
Events:
|
||||
FirstSeen LastSeen Count From SubobjectPath Reason Message
|
||||
───────── ──────── ───── ──── ───────────── ────── ───────
|
||||
46s 46s 1 {loadbalancer-controller } Success Created loadbalancer 130.211.5.27
|
||||
```
|
||||
|
||||
* time, t=5m
|
||||
```console
|
||||
$ kubectl describe ing
|
||||
target-proxy: k8s-tp-default-test-ingress
|
||||
url-map: k8s-um-default-test-ingress
|
||||
backends: {"k8s-be-32342":"HEALTHY"}
|
||||
forwarding-rule: k8s-fw-default-test-ingress
|
||||
Events:
|
||||
FirstSeen LastSeen Count From SubobjectPath Reason Message
|
||||
───────── ──────── ───── ──── ───────────── ────── ───────
|
||||
46s 46s 1 {loadbalancer-controller } Success Created loadbalancer 130.211.5.27
|
||||
```
|
||||
|
||||
## Disabling GLBC
|
||||
|
||||
Since GLBC runs as a cluster addon, you cannot simply delete the RC. The easiest way to disable it is to do as follows:
|
||||
|
||||
* IFF you want to tear down existing L7 loadbalancers, hit the /delete-all-and-quit endpoint on the pod:
|
||||
|
||||
```console
|
||||
$ kubectl get pods --namespace=kube-system
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
l7-lb-controller-7bb21 1/1 Running 0 1h
|
||||
$ kubectl exec l7-lb-controller-7bb21 -c l7-lb-controller curl http://localhost:8081/delete-all-and-quit --namespace=kube-system
|
||||
$ kubectl logs l7-lb-controller-7b221 -c l7-lb-controller --follow
|
||||
...
|
||||
I1007 00:30:00.322528 1 main.go:160] Handled quit, awaiting pod deletion.
|
||||
```
|
||||
|
||||
* Nullify the RC (but don't delete it or the addon controller will "fix" it for you)
|
||||
```console
|
||||
$ kubectl scale rc l7-lb-controller --replicas=0 --namespace=kube-system
|
||||
```
|
||||
|
||||
## Limitations
|
||||
|
||||
* This cluster addon is still in the Beta phase. It behooves you to read through the GLBC documentation mentioned above and make sure there are no surprises.
|
||||
* The recommended way to tear down a cluster with active Ingresses is to either delete each Ingress, or hit the /delete-all-and-quit endpoint on GLBC as described below, before invoking a cluster teardown script (eg: kube-down.sh). You will have to manually cleanup GCE resources through the [cloud console](https://cloud.google.com/compute/docs/console#access) or [gcloud CLI](https://cloud.google.com/compute/docs/gcloud-compute/) if you simply tear down the cluster with active Ingresses.
|
||||
* All L7 Loadbalancers created by GLBC have a default backend. If you don't specify one in your Ingress, GLBC will assign the 404 default backend mentioned above.
|
||||
* All Kubernetes services must serve a 200 page on '/', or whatever custom value you've specified through GLBC's `--health-check-path argument`.
|
||||
* GLBC is not built for performance. Creating many Ingresses at a time can overwhelm it. It won't fall over, but will take its own time to churn through the Ingress queue. It doesn't understand concepts like fairness or backoff just yet.
|
||||
|
||||
[]()
|
43
vendor/k8s.io/kubernetes/cluster/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml
generated
vendored
Normal file
43
vendor/k8s.io/kubernetes/cluster/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: l7-default-backend
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: glbc
|
||||
kubernetes.io/name: "GLBC"
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: glbc
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: glbc
|
||||
name: glbc
|
||||
spec:
|
||||
containers:
|
||||
- name: default-http-backend
|
||||
# Any image is permissible as long as:
|
||||
# 1. It serves a 404 page at /
|
||||
# 2. It serves 200 on a /healthz endpoint
|
||||
image: gcr.io/google_containers/defaultbackend:1.3
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
22
vendor/k8s.io/kubernetes/cluster/addons/cluster-loadbalancing/glbc/default-svc.yaml
generated
vendored
Normal file
22
vendor/k8s.io/kubernetes/cluster/addons/cluster-loadbalancing/glbc/default-svc.yaml
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
# This must match the --default-backend-service argument of the l7 lb
|
||||
# controller and is required because GCE mandates a default backend.
|
||||
name: default-http-backend
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: glbc
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "GLBCDefaultBackend"
|
||||
spec:
|
||||
# The default backend must be of type NodePort.
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
k8s-app: glbc
|
8
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/OWNERS
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/OWNERS
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
approvers:
|
||||
- DirectXMan12
|
||||
- kawych
|
||||
- piosz
|
||||
reviewers:
|
||||
- DirectXMan12
|
||||
- kawych
|
||||
- piosz
|
8
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/README.md
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/README.md
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
# Kubernetes Monitoring
|
||||
|
||||
[Heapster](https://github.com/kubernetes/heapster) enables monitoring and performance analysis in Kubernetes Clusters.
|
||||
Heapster collects signals from kubelets and the api server, processes them, and exports them via REST APIs or to a configurable timeseries storage backend.
|
||||
|
||||
More details can be found in [Monitoring user guide](http://kubernetes.io/docs/user-guide/monitoring/).
|
||||
|
||||
[]()
|
170
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/google/heapster-controller.yaml
generated
vendored
Normal file
170
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/google/heapster-controller.yaml
generated
vendored
Normal file
@ -0,0 +1,170 @@
|
||||
{% set base_metrics_memory = "140Mi" -%}
|
||||
{% set base_metrics_cpu = "80m" -%}
|
||||
{% set base_eventer_memory = "190Mi" -%}
|
||||
{% set metrics_memory_per_node = 4 -%}
|
||||
{% set metrics_cpu_per_node = 0.5 -%}
|
||||
{% set eventer_memory_per_node = 500 -%}
|
||||
{% set num_nodes = pillar.get('num_nodes', -1) -%}
|
||||
{% set nanny_memory = "90Mi" -%}
|
||||
{% set nanny_memory_per_node = 200 -%}
|
||||
{% if num_nodes >= 0 -%}
|
||||
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
|
||||
{% endif -%}
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: heapster-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
NannyConfiguration: |-
|
||||
apiVersion: nannyconfig/v1alpha1
|
||||
kind: NannyConfiguration
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: eventer-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
NannyConfiguration: |-
|
||||
apiVersion: nannyconfig/v1alpha1
|
||||
kind: NannyConfiguration
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.5.0
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.5.0
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.0
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8082
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 180
|
||||
timeoutSeconds: 5
|
||||
command:
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --sink=gcm
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
|
||||
name: eventer
|
||||
command:
|
||||
- /eventer
|
||||
- --source=kubernetes:''
|
||||
- --sink=gcl
|
||||
- image: gcr.io/google_containers/addon-resizer:1.8.1
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
volumeMounts:
|
||||
- name: heapster-config-volume
|
||||
mountMath: /etc/config
|
||||
env:
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
command:
|
||||
- /pod_nanny
|
||||
- --config-dir=/etc/config
|
||||
- --cpu={{ base_metrics_cpu }}
|
||||
- --extra-cpu={{ metrics_cpu_per_node }}m
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{metrics_memory_per_node}}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.0
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
- image: gcr.io/google_containers/addon-resizer:1.8.1
|
||||
name: eventer-nanny
|
||||
resources:
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
env:
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
- name: eventer-config-volume
|
||||
mountMath: /etc/config
|
||||
command:
|
||||
- /pod_nanny
|
||||
- --config-dir=/etc/config
|
||||
- --cpu=100m
|
||||
- --extra-cpu=0m
|
||||
- --memory={{base_eventer_memory}}
|
||||
- --extra-memory={{eventer_memory_per_node}}Ki
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.0
|
||||
- --container=eventer
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
volumes:
|
||||
- name: heapster-config-volume
|
||||
configMap:
|
||||
name: heapster-config
|
||||
volumes:
|
||||
- name: eventer-config-volume
|
||||
configMap:
|
||||
name: eventer-config
|
||||
serviceAccountName: heapster
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
15
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/google/heapster-service.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/google/heapster-service.yaml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "Heapster"
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8082
|
||||
selector:
|
||||
k8s-app: heapster
|
171
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml
generated
vendored
Normal file
171
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml
generated
vendored
Normal file
@ -0,0 +1,171 @@
|
||||
{% set base_metrics_memory = "140Mi" -%}
|
||||
{% set base_metrics_cpu = "80m" -%}
|
||||
{% set base_eventer_memory = "190Mi" -%}
|
||||
{% set metrics_memory_per_node = 4 -%}
|
||||
{% set metrics_cpu_per_node = 0.5 -%}
|
||||
{% set eventer_memory_per_node = 500 -%}
|
||||
{% set num_nodes = pillar.get('num_nodes', -1) -%}
|
||||
{% set nanny_memory = "90Mi" -%}
|
||||
{% set nanny_memory_per_node = 200 -%}
|
||||
{% if num_nodes >= 0 -%}
|
||||
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
|
||||
{% endif -%}
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: heapster-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
NannyConfiguration: |-
|
||||
apiVersion: nannyconfig/v1alpha1
|
||||
kind: NannyConfiguration
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: eventer-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
NannyConfiguration: |-
|
||||
apiVersion: nannyconfig/v1alpha1
|
||||
kind: NannyConfiguration
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.5.0
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.5.0
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.0
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
|
||||
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8082
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 180
|
||||
timeoutSeconds: 5
|
||||
command:
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --sink=influxdb:http://monitoring-influxdb:8086
|
||||
- --sink=gcm:?metrics=autoscaling
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
|
||||
name: eventer
|
||||
command:
|
||||
- /eventer
|
||||
- --source=kubernetes:''
|
||||
- --sink=gcl
|
||||
- image: gcr.io/google_containers/addon-resizer:1.8.1
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
volumeMounts:
|
||||
- name: heapster-config-volume
|
||||
mountPath: /etc/config
|
||||
env:
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
command:
|
||||
- /pod_nanny
|
||||
- --config-dir=/etc/config
|
||||
- --cpu={{ base_metrics_cpu }}
|
||||
- --extra-cpu={{ metrics_cpu_per_node }}m
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{ metrics_memory_per_node }}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.0
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
- image: gcr.io/google_containers/addon-resizer:1.8.1
|
||||
name: eventer-nanny
|
||||
resources:
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
volumeMounts:
|
||||
- name: eventer-config-volume
|
||||
mountPath: /etc/config
|
||||
env:
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
command:
|
||||
- /pod_nanny
|
||||
- --config-dir=/etc/config
|
||||
- --cpu=100m
|
||||
- --extra-cpu=0m
|
||||
- --memory={{ base_eventer_memory }}
|
||||
- --extra-memory={{ eventer_memory_per_node }}Ki
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.0
|
||||
- --container=eventer
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
volumes:
|
||||
- name: heapster-config-volume
|
||||
configMap:
|
||||
name: heapster-config
|
||||
- name: eventer-config-volume
|
||||
configMap:
|
||||
name: eventer-config
|
||||
serviceAccountName: heapster
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
58
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/heapster-rbac.yaml
generated
vendored
Normal file
58
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/heapster-rbac.yaml
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: heapster-binding
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:heapster
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
---
|
||||
# Heapster's pod_nanny monitors the heapster deployment & its pod(s), and scales
|
||||
# the resources of the deployment if necessary.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: system:pod-nanny
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: heapster-binding
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: system:pod-nanny
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
---
|
19
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/influxdb/grafana-service.yaml
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/influxdb/grafana-service.yaml
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: monitoring-grafana
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "Grafana"
|
||||
spec:
|
||||
# On production clusters, consider setting up auth for grafana, and
|
||||
# exposing Grafana either using a LoadBalancer or a public IP.
|
||||
# type: LoadBalancer
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: ui
|
||||
selector:
|
||||
k8s-app: influxGrafana
|
169
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml
generated
vendored
Normal file
169
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml
generated
vendored
Normal file
@ -0,0 +1,169 @@
|
||||
{% set base_metrics_memory = "140Mi" -%}
|
||||
{% set base_metrics_cpu = "80m" -%}
|
||||
{% set base_eventer_memory = "190Mi" -%}
|
||||
{% set metrics_memory_per_node = 4 -%}
|
||||
{% set metrics_cpu_per_node = 0.5|float -%}
|
||||
{% set eventer_memory_per_node = 500 -%}
|
||||
{% set num_nodes = pillar.get('num_nodes', -1) -%}
|
||||
{% set nanny_memory = "90Mi" -%}
|
||||
{% set nanny_memory_per_node = 200 -%}
|
||||
{% if num_nodes >= 0 -%}
|
||||
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
|
||||
{% endif -%}
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: heapster-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
NannyConfiguration: |-
|
||||
apiVersion: nannyconfig/v1alpha1
|
||||
kind: NannyConfiguration
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: eventer-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
NannyConfiguration: |-
|
||||
apiVersion: nannyconfig/v1alpha1
|
||||
kind: NannyConfiguration
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.5.0
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.5.0
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.0
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8082
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 180
|
||||
timeoutSeconds: 5
|
||||
command:
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --sink=influxdb:http://monitoring-influxdb:8086
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
|
||||
name: eventer
|
||||
command:
|
||||
- /eventer
|
||||
- --source=kubernetes:''
|
||||
- --sink=influxdb:http://monitoring-influxdb:8086
|
||||
- image: gcr.io/google_containers/addon-resizer:1.8.1
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
env:
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
- name: heapster-config-volume
|
||||
mountPath: /etc/config
|
||||
command:
|
||||
- /pod_nanny
|
||||
- --config-dir=/etc/config
|
||||
- --cpu={{ base_metrics_cpu }}
|
||||
- --extra-cpu={{ metrics_cpu_per_node }}m
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{ metrics_memory_per_node }}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.0
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
- image: gcr.io/google_containers/addon-resizer:1.8.1
|
||||
name: eventer-nanny
|
||||
resources:
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
env:
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
- name: eventer-config-volume
|
||||
mountPath: /etc/config
|
||||
command:
|
||||
- /pod_nanny
|
||||
- --config-dir=/etc/config
|
||||
- --cpu=100m
|
||||
- --extra-cpu=0m
|
||||
- --memory={{ base_eventer_memory }}
|
||||
- --extra-memory={{ eventer_memory_per_node }}Ki
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.0
|
||||
- --container=eventer
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
volumes:
|
||||
- name: heapster-config-volume
|
||||
configMap:
|
||||
name: heapster-config
|
||||
- name: eventer-config-volume
|
||||
configMap:
|
||||
name: eventer-config
|
||||
serviceAccountName: heapster
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
15
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/influxdb/heapster-service.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/influxdb/heapster-service.yaml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "Heapster"
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8082
|
||||
selector:
|
||||
k8s-app: heapster
|
85
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml
generated
vendored
Normal file
85
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
kind: Deployment
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
name: monitoring-influxdb-grafana-v4
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: influxGrafana
|
||||
version: v4
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: influxGrafana
|
||||
version: v4
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: influxGrafana
|
||||
version: v4
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: influxdb
|
||||
image: gcr.io/google_containers/heapster-influxdb-amd64:v1.3.3
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8083
|
||||
- name: api
|
||||
containerPort: 8086
|
||||
volumeMounts:
|
||||
- name: influxdb-persistent-storage
|
||||
mountPath: /data
|
||||
- name: grafana
|
||||
image: gcr.io/google_containers/heapster-grafana-amd64:v4.4.3
|
||||
env:
|
||||
resources:
|
||||
# keep request = limit to keep this container in guaranteed class
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
env:
|
||||
# This variable is required to setup templates in Grafana.
|
||||
- name: INFLUXDB_SERVICE_URL
|
||||
value: http://monitoring-influxdb:8086
|
||||
# The following env variables are required to make Grafana accessible via
|
||||
# the kubernetes api-server proxy. On production clusters, we recommend
|
||||
# removing these env variables, setup auth for grafana, and expose the grafana
|
||||
# service using a LoadBalancer or a public IP.
|
||||
- name: GF_AUTH_BASIC_ENABLED
|
||||
value: "false"
|
||||
- name: GF_AUTH_ANONYMOUS_ENABLED
|
||||
value: "true"
|
||||
- name: GF_AUTH_ANONYMOUS_ORG_ROLE
|
||||
value: Admin
|
||||
- name: GF_SERVER_ROOT_URL
|
||||
value: /api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/
|
||||
ports:
|
||||
- name: ui
|
||||
containerPort: 3000
|
||||
volumeMounts:
|
||||
- name: grafana-persistent-storage
|
||||
mountPath: /var
|
||||
volumes:
|
||||
- name: influxdb-persistent-storage
|
||||
emptyDir: {}
|
||||
- name: grafana-persistent-storage
|
||||
emptyDir: {}
|
19
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/influxdb/influxdb-service.yaml
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/influxdb/influxdb-service.yaml
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: monitoring-influxdb
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "InfluxDB"
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 8083
|
||||
targetPort: 8083
|
||||
- name: api
|
||||
port: 8086
|
||||
targetPort: 8086
|
||||
selector:
|
||||
k8s-app: influxGrafana
|
132
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml
generated
vendored
Normal file
132
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml
generated
vendored
Normal file
@ -0,0 +1,132 @@
|
||||
{% set base_metrics_memory = "140Mi" -%}
|
||||
{% set base_metrics_cpu = "80m" -%}
|
||||
{% set metrics_memory_per_node = 4 -%}
|
||||
{% set metrics_cpu_per_node = 0.5 -%}
|
||||
{% set num_nodes = pillar.get('num_nodes', -1) -%}
|
||||
{% set nanny_memory = "90Mi" -%}
|
||||
{% set nanny_memory_per_node = 200 -%}
|
||||
{% if num_nodes >= 0 -%}
|
||||
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
|
||||
{% endif -%}
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: heapster-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
NannyConfiguration: |-
|
||||
apiVersion: nannyconfig/v1alpha1
|
||||
kind: NannyConfiguration
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.5.0
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.5.0
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.0
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8082
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 180
|
||||
timeoutSeconds: 5
|
||||
command:
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --sink=stackdriver:?cluster_name={{ cluster_name }}&use_old_resources={{ use_old_resources }}&use_new_resources={{ use_new_resources }}&min_interval_sec=100&batch_export_timeout_sec=110
|
||||
# BEGIN_PROMETHEUS_TO_SD
|
||||
- name: prom-to-sd
|
||||
image: gcr.io/google-containers/prometheus-to-sd:v0.2.2
|
||||
command:
|
||||
- /monitor
|
||||
- --source=heapster:http://localhost:8082?whitelisted=stackdriver_requests_count,stackdriver_timeseries_count
|
||||
- --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons
|
||||
- --api-override={{ prometheus_to_sd_endpoint }}
|
||||
- --pod-id=$(POD_NAME)
|
||||
- --namespace-id=$(POD_NAMESPACE)
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
# END_PROMETHEUS_TO_SD
|
||||
- image: gcr.io/google_containers/addon-resizer:1.8.1
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
volumeMounts:
|
||||
- name: heapster-config-volume
|
||||
mountPath: /etc/config
|
||||
env:
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
command:
|
||||
- /pod_nanny
|
||||
- --config-dir=/etc/config
|
||||
- --cpu={{ base_metrics_cpu }}
|
||||
- --extra-cpu={{ metrics_cpu_per_node }}m
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{metrics_memory_per_node}}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.0
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
volumes:
|
||||
- name: heapster-config-volume
|
||||
configMap:
|
||||
name: heapster-config
|
||||
serviceAccountName: heapster
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
15
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/stackdriver/heapster-service.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/stackdriver/heapster-service.yaml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "Heapster"
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8082
|
||||
selector:
|
||||
k8s-app: heapster
|
111
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml
generated
vendored
Normal file
111
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml
generated
vendored
Normal file
@ -0,0 +1,111 @@
|
||||
{% set base_metrics_memory = "140Mi" -%}
|
||||
{% set metrics_memory_per_node = 4 -%}
|
||||
{% set base_metrics_cpu = "80m" -%}
|
||||
{% set metrics_cpu_per_node = 0.5 -%}
|
||||
{% set num_nodes = pillar.get('num_nodes', -1) -%}
|
||||
{% set nanny_memory = "90Mi" -%}
|
||||
{% set nanny_memory_per_node = 200 -%}
|
||||
{% if num_nodes >= 0 -%}
|
||||
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
|
||||
{% endif -%}
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: heapster-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
NannyConfiguration: |-
|
||||
apiVersion: nannyconfig/v1alpha1
|
||||
kind: NannyConfiguration
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.5.0
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.5.0
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.0
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.5.0
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8082
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 180
|
||||
timeoutSeconds: 5
|
||||
command:
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- image: gcr.io/google_containers/addon-resizer:1.8.1
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
env:
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
- name: heapster-config-volume
|
||||
mountPath: /etc/config
|
||||
command:
|
||||
- /pod_nanny
|
||||
- --config-dir=/etc/config
|
||||
- --cpu={{ base_metrics_cpu }}
|
||||
- --extra-cpu={{ metrics_cpu_per_node }}m
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{ metrics_memory_per_node }}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.0
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
volumes:
|
||||
- name: heapster-config-volume
|
||||
configMap:
|
||||
name: heapster-config
|
||||
serviceAccountName: heapster
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
15
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/standalone/heapster-service.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/standalone/heapster-service.yaml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "Heapster"
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8082
|
||||
selector:
|
||||
k8s-app: heapster
|
6
vendor/k8s.io/kubernetes/cluster/addons/dashboard/MAINTAINERS.md
generated
vendored
Normal file
6
vendor/k8s.io/kubernetes/cluster/addons/dashboard/MAINTAINERS.md
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
# Maintainers
|
||||
|
||||
Piotr Bryk <bryk@google.com> and committers to the https://github.com/kubernetes/dashboard repository.
|
||||
|
||||
|
||||
[]()
|
10
vendor/k8s.io/kubernetes/cluster/addons/dashboard/README.md
generated
vendored
Normal file
10
vendor/k8s.io/kubernetes/cluster/addons/dashboard/README.md
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
# Kubernetes Dashboard
|
||||
|
||||
Kubernetes Dashboard is a general purpose, web-based UI for Kubernetes clusters.
|
||||
It allows users to manage applications running in the cluster, troubleshoot them,
|
||||
as well as manage the cluster itself.
|
||||
|
||||
Learn more at: https://github.com/kubernetes/dashboard
|
||||
|
||||
|
||||
[]()
|
9
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-configmap.yaml
generated
vendored
Normal file
9
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-configmap.yaml
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
# Allows editing resource and makes sure it is created first.
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kube-system
|
66
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-controller.yaml
generated
vendored
Normal file
66
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-controller.yaml
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.8.0
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 300Mi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 100Mi
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
- name: tmp-volume
|
||||
mountPath: /tmp
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
45
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-rbac.yaml
generated
vendored
Normal file
45
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-rbac.yaml
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
rules:
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics from heapster.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster"]
|
||||
verbs: ["proxy"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard-minimal
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
10
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-secret.yaml
generated
vendored
Normal file
10
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-secret.yaml
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
# Allows editing resource and makes sure it is created first.
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kube-system
|
||||
type: Opaque
|
15
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-service.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-service.yaml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
55
vendor/k8s.io/kubernetes/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: nvidia-gpu-device-plugin
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: nvidia-gpu-device-plugin
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: nvidia-gpu-device-plugin
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: cloud.google.com/gke-accelerator
|
||||
operator: Exists
|
||||
tolerations:
|
||||
- key: "nvidia.com/gpu"
|
||||
effect: "NoSchedule"
|
||||
operator: "Exists"
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
volumes:
|
||||
- name: device-plugin
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/device-plugins
|
||||
- name: dev
|
||||
hostPath:
|
||||
path: /dev
|
||||
containers:
|
||||
- image: "gcr.io/google-containers/nvidia-gpu-device-plugin@sha256:0e79da6998a61257585e0d3fb5848240129f0fa5b4ad972dfed4049448093c33"
|
||||
command: ["/usr/bin/nvidia-gpu-device-plugin", "-logtostderr"]
|
||||
name: nvidia-gpu-device-plugin
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 10Mi
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: 10Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: device-plugin
|
||||
mountPath: /device-plugin
|
||||
- name: dev
|
||||
mountPath: /dev
|
6
vendor/k8s.io/kubernetes/cluster/addons/dns-horizontal-autoscaler/MAINTAINERS.md
generated
vendored
Normal file
6
vendor/k8s.io/kubernetes/cluster/addons/dns-horizontal-autoscaler/MAINTAINERS.md
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
# Maintainers
|
||||
|
||||
Zihong Zheng <zihongz@google.com>
|
||||
|
||||
|
||||
[]()
|
6
vendor/k8s.io/kubernetes/cluster/addons/dns-horizontal-autoscaler/OWNERS
generated
vendored
Normal file
6
vendor/k8s.io/kubernetes/cluster/addons/dns-horizontal-autoscaler/OWNERS
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
approvers:
|
||||
- bowei
|
||||
- mrhohn
|
||||
reviewers:
|
||||
- bowei
|
||||
- mrhohn
|
14
vendor/k8s.io/kubernetes/cluster/addons/dns-horizontal-autoscaler/README.md
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/cluster/addons/dns-horizontal-autoscaler/README.md
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
# DNS Horizontal Autoscaler
|
||||
|
||||
DNS Horizontal Autoscaler enables horizontal autoscaling feature for DNS service
|
||||
in Kubernetes clusters. This autoscaler runs as a Deployment. It collects cluster
|
||||
status from the APIServer, horizontally scales the number of DNS backends based
|
||||
on demand. Autoscaling parameters could be tuned by modifying the `kube-dns-autoscaler`
|
||||
ConfigMap in `kube-system` namespace.
|
||||
|
||||
Learn more about:
|
||||
- Usage: http://kubernetes.io/docs/tasks/administer-cluster/dns-horizontal-autoscaling/
|
||||
- Implementation: https://github.com/kubernetes-incubator/cluster-proportional-autoscaler/
|
||||
|
||||
|
||||
[]()
|
99
vendor/k8s.io/kubernetes/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
generated
vendored
Normal file
99
vendor/k8s.io/kubernetes/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
kind: ServiceAccount
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kube-dns-autoscaler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: system:kube-dns-autoscaler
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["replicationcontrollers/scale"]
|
||||
verbs: ["get", "update"]
|
||||
- apiGroups: ["extensions"]
|
||||
resources: ["deployments/scale", "replicasets/scale"]
|
||||
verbs: ["get", "update"]
|
||||
# Remove the configmaps rule once below issue is fixed:
|
||||
# kubernetes-incubator/cluster-proportional-autoscaler#16
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "create"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: system:kube-dns-autoscaler
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kube-dns-autoscaler
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: system:kube-dns-autoscaler
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kube-dns-autoscaler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns-autoscaler
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns-autoscaler
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
containers:
|
||||
- name: autoscaler
|
||||
image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.2-r2
|
||||
resources:
|
||||
requests:
|
||||
cpu: "20m"
|
||||
memory: "10Mi"
|
||||
command:
|
||||
- /cluster-proportional-autoscaler
|
||||
- --namespace=kube-system
|
||||
- --configmap=kube-dns-autoscaler
|
||||
# Should keep target in sync with cluster/addons/dns/kube-dns.yaml.base
|
||||
- --target=Deployment/kube-dns
|
||||
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
|
||||
# If using small nodes, "nodesPerReplica" should dominate.
|
||||
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
|
||||
- --logtostderr=true
|
||||
- --v=2
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
serviceAccountName: kube-dns-autoscaler
|
34
vendor/k8s.io/kubernetes/cluster/addons/dns/Makefile
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/cluster/addons/dns/Makefile
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Makefile for the kubedns underscore templates to Salt/Pillar and other formats.
|
||||
|
||||
# If you update the *.base templates, please run this Makefile before pushing.
|
||||
#
|
||||
# Usage:
|
||||
# make
|
||||
|
||||
all: transform
|
||||
|
||||
# .base -> .in pattern rule
|
||||
%.in: %.base
|
||||
sed -f transforms2salt.sed $< | sed s/__SOURCE_FILENAME__/$</g > $@
|
||||
|
||||
# .base -> .sed pattern rule
|
||||
%.sed: %.base
|
||||
sed -f transforms2sed.sed $< | sed s/__SOURCE_FILENAME__/$</g > $@
|
||||
|
||||
transform: kube-dns.yaml.in kube-dns.yaml.sed coredns.yaml.in coredns.yaml.sed
|
||||
|
||||
.PHONY: transform
|
6
vendor/k8s.io/kubernetes/cluster/addons/dns/OWNERS
generated
vendored
Normal file
6
vendor/k8s.io/kubernetes/cluster/addons/dns/OWNERS
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
approvers:
|
||||
- bowei
|
||||
- mrhohn
|
||||
reviewers:
|
||||
- bowei
|
||||
- mrhohn
|
64
vendor/k8s.io/kubernetes/cluster/addons/dns/README.md
generated
vendored
Normal file
64
vendor/k8s.io/kubernetes/cluster/addons/dns/README.md
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
# kube-dns
|
||||
|
||||
`kube-dns` schedules DNS Pods and Service on the cluster, other pods in cluster
|
||||
can use the DNS Service’s IP to resolve DNS names.
|
||||
|
||||
* [Administrators guide](http://kubernetes.io/docs/admin/dns/)
|
||||
* [Code repository](http://www.github.com/kubernetes/dns)
|
||||
|
||||
## Manually scale kube-dns Deployment
|
||||
|
||||
kube-dns creates only one DNS Pod by default. If
|
||||
[dns-horizontal-autoscaler](../dns-horizontal-autoscaler/)
|
||||
is not enabled, you may need to manually scale kube-dns Deployment.
|
||||
|
||||
Please use below `kubectl scale` command to scale:
|
||||
```
|
||||
kubectl --namespace=kube-system scale deployment kube-dns --replicas=<NUM_YOU_WANT>
|
||||
```
|
||||
|
||||
Do not use `kubectl edit` to modify kube-dns Deployment object if it is
|
||||
controlled by [Addon Manager](../addon-manager/). Otherwise the modifications
|
||||
will be clobbered, in addition the replicas count for kube-dns Deployment will
|
||||
be reset to 1. See [Cluster add-ons README](../README.md) and
|
||||
[#36411](https://github.com/kubernetes/kubernetes/issues/36411) for reference.
|
||||
|
||||
## kube-dns addon templates
|
||||
|
||||
This directory contains the base UNDERSCORE templates that can be used to
|
||||
generate the kube-dns.yaml.in needed in Salt format.
|
||||
|
||||
Due to a varied preference in templating language choices, the transform
|
||||
Makefile in this directory should be enhanced to generate all required formats
|
||||
from the base underscore templates.
|
||||
|
||||
**N.B.**: When you add a parameter you should also update the various scripts
|
||||
that supply values for your new parameter. Here is one way you might find those
|
||||
scripts:
|
||||
|
||||
```
|
||||
cd kubernetes && git grep 'kube-dns.yaml'
|
||||
```
|
||||
|
||||
### Base Template files
|
||||
|
||||
These are the authoritative base templates.
|
||||
Run 'make' to generate the Salt and Sed yaml templates from these.
|
||||
|
||||
```
|
||||
kube-dns.yaml.base
|
||||
```
|
||||
|
||||
### Generated Salt files
|
||||
|
||||
```
|
||||
kube-dns.yaml.in
|
||||
```
|
||||
|
||||
### Generated Sed files
|
||||
|
||||
```
|
||||
kube-dns.yaml.sed
|
||||
```
|
||||
|
||||
[]()
|
161
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns.yaml.base
generated
vendored
Normal file
161
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns.yaml.base
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
||||
# __MACHINE_GENERATED_WARNING__
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: system:coredns
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
- pods
|
||||
- namespaces
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
name: system:coredns
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:coredns
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
Corefile: |
|
||||
.:53 {
|
||||
errors
|
||||
log
|
||||
health
|
||||
kubernetes __PILLAR__DNS__DOMAIN__ __PILLAR__CLUSTER_CIDR__ {
|
||||
pods insecure
|
||||
}
|
||||
prometheus
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: coredns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: coredns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: coredns
|
||||
spec:
|
||||
serviceAccountName: coredns
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: coredns
|
||||
image: coredns/coredns:1.0.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
args: [ "-conf", "/etc/coredns/Corefile" ]
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/coredns
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
- containerPort: 9153
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
dnsPolicy: Default
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: coredns
|
||||
items:
|
||||
- key: Corefile
|
||||
path: Corefile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: coredns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: coredns
|
||||
clusterIP: __PILLAR__DNS__SERVER__
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
- name: metrics
|
||||
port: 9153
|
||||
protocol: TCP
|
161
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns.yaml.in
generated
vendored
Normal file
161
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns.yaml.in
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
||||
# Warning: This is a file generated from the base underscore template file: coredns.yaml.base
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: system:coredns
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
- pods
|
||||
- namespaces
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
name: system:coredns
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:coredns
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
Corefile: |
|
||||
.:53 {
|
||||
errors
|
||||
log
|
||||
health
|
||||
kubernetes {{ pillar['dns_domain'] }} {{ pillar['service_cluster_ip_range'] }} {
|
||||
pods insecure
|
||||
}
|
||||
prometheus
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: coredns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: coredns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: coredns
|
||||
spec:
|
||||
serviceAccountName: coredns
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: coredns
|
||||
image: coredns/coredns:1.0.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
args: [ "-conf", "/etc/coredns/Corefile" ]
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/coredns
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
- containerPort: 9153
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
dnsPolicy: Default
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: coredns
|
||||
items:
|
||||
- key: Corefile
|
||||
path: Corefile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: coredns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: coredns
|
||||
clusterIP: {{ pillar['dns_server'] }}
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
- name: metrics
|
||||
port: 9153
|
||||
protocol: TCP
|
161
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns.yaml.sed
generated
vendored
Normal file
161
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns.yaml.sed
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
||||
# Warning: This is a file generated from the base underscore template file: coredns.yaml.base
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: system:coredns
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
- pods
|
||||
- namespaces
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
name: system:coredns
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:coredns
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
Corefile: |
|
||||
.:53 {
|
||||
errors
|
||||
log
|
||||
health
|
||||
kubernetes $DNS_DOMAIN $SERVICE_CLUSTER_IP_RANGE {
|
||||
pods insecure
|
||||
}
|
||||
prometheus
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: coredns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: coredns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: coredns
|
||||
spec:
|
||||
serviceAccountName: coredns
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: coredns
|
||||
image: coredns/coredns:1.0.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
args: [ "-conf", "/etc/coredns/Corefile" ]
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/coredns
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
- containerPort: 9153
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
dnsPolicy: Default
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: coredns
|
||||
items:
|
||||
- key: Corefile
|
||||
path: Corefile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: coredns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: coredns
|
||||
clusterIP: $DNS_SERVER_IP
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
- name: metrics
|
||||
port: 9153
|
||||
protocol: TCP
|
211
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns.yaml.base
generated
vendored
Normal file
211
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns.yaml.base
generated
vendored
Normal file
@ -0,0 +1,211 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
|
||||
# in sync with this file.
|
||||
|
||||
# __MACHINE_GENERATED_WARNING__
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "KubeDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: __PILLAR__DNS__SERVER__
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
# replicas: not specified here:
|
||||
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
|
||||
# 2. Default is 1.
|
||||
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 10%
|
||||
maxUnavailable: 0
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
volumes:
|
||||
- name: kube-dns-config
|
||||
configMap:
|
||||
name: kube-dns
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
# guaranteed class. Currently, this container falls into the
|
||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||
limits:
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/kubedns
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: 8081
|
||||
scheme: HTTP
|
||||
# we poll on pod startup for the Kubernetes master service and
|
||||
# only setup the /readiness HTTP server once that's available.
|
||||
initialDelaySeconds: 3
|
||||
timeoutSeconds: 5
|
||||
args:
|
||||
- --domain=__PILLAR__DNS__DOMAIN__.
|
||||
- --dns-port=10053
|
||||
- --config-dir=/kube-dns-config
|
||||
- --v=2
|
||||
env:
|
||||
- name: PROMETHEUS_PORT
|
||||
value: "10055"
|
||||
ports:
|
||||
- containerPort: 10053
|
||||
name: dns-local
|
||||
protocol: UDP
|
||||
- containerPort: 10053
|
||||
name: dns-tcp-local
|
||||
protocol: TCP
|
||||
- containerPort: 10055
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- -v=2
|
||||
- -logtostderr
|
||||
- -configDir=/etc/k8s/dns/dnsmasq-nanny
|
||||
- -restartDnsmasq=true
|
||||
- --
|
||||
- -k
|
||||
- --cache-size=1000
|
||||
- --no-negcache
|
||||
- --log-facility=-
|
||||
- --server=/__PILLAR__DNS__DOMAIN__/127.0.0.1#10053
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
- --server=/ip6.arpa/127.0.0.1#10053
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
|
||||
resources:
|
||||
requests:
|
||||
cpu: 150m
|
||||
memory: 20Mi
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- --v=2
|
||||
- --logtostderr
|
||||
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.__PILLAR__DNS__DOMAIN__,5,SRV
|
||||
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.__PILLAR__DNS__DOMAIN__,5,SRV
|
||||
ports:
|
||||
- containerPort: 10054
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
memory: 20Mi
|
||||
cpu: 10m
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
||||
serviceAccountName: kube-dns
|
211
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns.yaml.in
generated
vendored
Normal file
211
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns.yaml.in
generated
vendored
Normal file
@ -0,0 +1,211 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
|
||||
# in sync with this file.
|
||||
|
||||
# Warning: This is a file generated from the base underscore template file: kube-dns.yaml.base
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "KubeDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: {{ pillar['dns_server'] }}
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
# replicas: not specified here:
|
||||
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
|
||||
# 2. Default is 1.
|
||||
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 10%
|
||||
maxUnavailable: 0
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
volumes:
|
||||
- name: kube-dns-config
|
||||
configMap:
|
||||
name: kube-dns
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
# guaranteed class. Currently, this container falls into the
|
||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||
limits:
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/kubedns
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: 8081
|
||||
scheme: HTTP
|
||||
# we poll on pod startup for the Kubernetes master service and
|
||||
# only setup the /readiness HTTP server once that's available.
|
||||
initialDelaySeconds: 3
|
||||
timeoutSeconds: 5
|
||||
args:
|
||||
- --domain={{ pillar['dns_domain'] }}.
|
||||
- --dns-port=10053
|
||||
- --config-dir=/kube-dns-config
|
||||
- --v=2
|
||||
env:
|
||||
- name: PROMETHEUS_PORT
|
||||
value: "10055"
|
||||
ports:
|
||||
- containerPort: 10053
|
||||
name: dns-local
|
||||
protocol: UDP
|
||||
- containerPort: 10053
|
||||
name: dns-tcp-local
|
||||
protocol: TCP
|
||||
- containerPort: 10055
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- -v=2
|
||||
- -logtostderr
|
||||
- -configDir=/etc/k8s/dns/dnsmasq-nanny
|
||||
- -restartDnsmasq=true
|
||||
- --
|
||||
- -k
|
||||
- --cache-size=1000
|
||||
- --no-negcache
|
||||
- --log-facility=-
|
||||
- --server=/{{ pillar['dns_domain'] }}/127.0.0.1#10053
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
- --server=/ip6.arpa/127.0.0.1#10053
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
|
||||
resources:
|
||||
requests:
|
||||
cpu: 150m
|
||||
memory: 20Mi
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- --v=2
|
||||
- --logtostderr
|
||||
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ pillar['dns_domain'] }},5,SRV
|
||||
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ pillar['dns_domain'] }},5,SRV
|
||||
ports:
|
||||
- containerPort: 10054
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
memory: 20Mi
|
||||
cpu: 10m
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
||||
serviceAccountName: kube-dns
|
211
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns.yaml.sed
generated
vendored
Normal file
211
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns.yaml.sed
generated
vendored
Normal file
@ -0,0 +1,211 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
|
||||
# in sync with this file.
|
||||
|
||||
# Warning: This is a file generated from the base underscore template file: kube-dns.yaml.base
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "KubeDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: $DNS_SERVER_IP
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
# replicas: not specified here:
|
||||
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
|
||||
# 2. Default is 1.
|
||||
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 10%
|
||||
maxUnavailable: 0
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
volumes:
|
||||
- name: kube-dns-config
|
||||
configMap:
|
||||
name: kube-dns
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
# guaranteed class. Currently, this container falls into the
|
||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||
limits:
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/kubedns
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: 8081
|
||||
scheme: HTTP
|
||||
# we poll on pod startup for the Kubernetes master service and
|
||||
# only setup the /readiness HTTP server once that's available.
|
||||
initialDelaySeconds: 3
|
||||
timeoutSeconds: 5
|
||||
args:
|
||||
- --domain=$DNS_DOMAIN.
|
||||
- --dns-port=10053
|
||||
- --config-dir=/kube-dns-config
|
||||
- --v=2
|
||||
env:
|
||||
- name: PROMETHEUS_PORT
|
||||
value: "10055"
|
||||
ports:
|
||||
- containerPort: 10053
|
||||
name: dns-local
|
||||
protocol: UDP
|
||||
- containerPort: 10053
|
||||
name: dns-tcp-local
|
||||
protocol: TCP
|
||||
- containerPort: 10055
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- -v=2
|
||||
- -logtostderr
|
||||
- -configDir=/etc/k8s/dns/dnsmasq-nanny
|
||||
- -restartDnsmasq=true
|
||||
- --
|
||||
- -k
|
||||
- --cache-size=1000
|
||||
- --no-negcache
|
||||
- --log-facility=-
|
||||
- --server=/$DNS_DOMAIN/127.0.0.1#10053
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
- --server=/ip6.arpa/127.0.0.1#10053
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
|
||||
resources:
|
||||
requests:
|
||||
cpu: 150m
|
||||
memory: 20Mi
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- --v=2
|
||||
- --logtostderr
|
||||
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.$DNS_DOMAIN,5,SRV
|
||||
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.$DNS_DOMAIN,5,SRV
|
||||
ports:
|
||||
- containerPort: 10054
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
memory: 20Mi
|
||||
cpu: 10m
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
||||
serviceAccountName: kube-dns
|
4
vendor/k8s.io/kubernetes/cluster/addons/dns/transforms2salt.sed
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/cluster/addons/dns/transforms2salt.sed
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
s/__PILLAR__DNS__SERVER__/{{ pillar['dns_server'] }}/g
|
||||
s/__PILLAR__DNS__DOMAIN__/{{ pillar['dns_domain'] }}/g
|
||||
s/__PILLAR__CLUSTER_CIDR__/{{ pillar['service_cluster_ip_range'] }}/g
|
||||
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g
|
4
vendor/k8s.io/kubernetes/cluster/addons/dns/transforms2sed.sed
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/cluster/addons/dns/transforms2sed.sed
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
s/__PILLAR__DNS__SERVER__/$DNS_SERVER_IP/g
|
||||
s/__PILLAR__DNS__DOMAIN__/$DNS_DOMAIN/g
|
||||
s/__PILLAR__CLUSTER_CIDR__/$SERVICE_CLUSTER_IP_RANGE/g
|
||||
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g
|
26
vendor/k8s.io/kubernetes/cluster/addons/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml
generated
vendored
Normal file
26
vendor/k8s.io/kubernetes/cluster/addons/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: etcd-empty-dir-cleanup
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: etcd-empty-dir-cleanup
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: etcd-empty-dir-cleanup
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
labels:
|
||||
k8s-app: etcd-empty-dir-cleanup
|
||||
spec:
|
||||
serviceAccountName: etcd-empty-dir-cleanup
|
||||
hostNetwork: true
|
||||
dnsPolicy: Default
|
||||
containers:
|
||||
- name: etcd-empty-dir-cleanup
|
||||
image: gcr.io/google-containers/etcd-empty-dir-cleanup:3.0.14.0
|
16
vendor/k8s.io/kubernetes/cluster/addons/etcd-empty-dir-cleanup/podsecuritypolicies/etcd-empty-dir-cleanup-psp-binding.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/cluster/addons/etcd-empty-dir-cleanup/podsecuritypolicies/etcd-empty-dir-cleanup-psp-binding.yaml
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: gce:podsecuritypolicy:etcd-empty-dir-cleanup
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/cluster-service: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: gce:podsecuritypolicy:etcd-empty-dir-cleanup
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: etcd-empty-dir-cleanup
|
||||
namespace: kube-system
|
17
vendor/k8s.io/kubernetes/cluster/addons/etcd-empty-dir-cleanup/podsecuritypolicies/etcd-empty-dir-cleanup-psp-role.yaml
generated
vendored
Normal file
17
vendor/k8s.io/kubernetes/cluster/addons/etcd-empty-dir-cleanup/podsecuritypolicies/etcd-empty-dir-cleanup-psp-role.yaml
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: gce:podsecuritypolicy:etcd-empty-dir-cleanup
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resourceNames:
|
||||
- gce.etcd-empty-dir-cleanup
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
31
vendor/k8s.io/kubernetes/cluster/addons/etcd-empty-dir-cleanup/podsecuritypolicies/etcd-empty-dir-cleanup-psp.yaml
generated
vendored
Normal file
31
vendor/k8s.io/kubernetes/cluster/addons/etcd-empty-dir-cleanup/podsecuritypolicies/etcd-empty-dir-cleanup-psp.yaml
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: gce.etcd-empty-dir-cleanup
|
||||
annotations:
|
||||
kubernetes.io/description: 'Policy used by the etcd-empty-dir-cleanup addon.'
|
||||
# TODO: etcd-empty-dir-cleanup should run with the default seccomp profile
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
# 'runtime/default' is already the default, but must be filled in on the
|
||||
# pod to pass admission.
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
|
||||
labels:
|
||||
kubernetes.io/cluster-service: 'true'
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
privileged: false
|
||||
volumes:
|
||||
- 'secret'
|
||||
hostNetwork: true
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
runAsUser:
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'RunAsAny'
|
||||
fsGroup:
|
||||
rule: 'RunAsAny'
|
||||
readOnlyRootFilesystem: false
|
8
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/OWNERS
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/OWNERS
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
approvers:
|
||||
- coffeepac
|
||||
- crassirostris
|
||||
- piosz
|
||||
reviewers:
|
||||
- coffeepac
|
||||
- crassirostris
|
||||
- piosz
|
82
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/README.md
generated
vendored
Normal file
82
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/README.md
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
# Elasticsearch Add-On
|
||||
|
||||
This add-on consists of a combination of [Elasticsearch][elasticsearch],
|
||||
[Fluentd][fluentd] and [Kibana][kibana]. Elasticsearch is a search engine
|
||||
that is responsible for storing our logs and allowing for them to be queried.
|
||||
Fluentd sends log messages from Kubernetes to Elasticsearch, whereas Kibana
|
||||
is a graphical interface for viewing and querying the logs stored in
|
||||
Elasticsearch.
|
||||
|
||||
**Note:** this addon should **not** be used as-is in production. This is
|
||||
an example and you should treat is as such. Please see at least the
|
||||
[Security](#security) and the [Storage](#storage) sections for more
|
||||
information.
|
||||
|
||||
## Elasticsearch
|
||||
|
||||
Elasticsearch is deployed as a [StatefulSet][statefulSet], which is like
|
||||
a Deployment, but allows for maintaining state on storage volumes.
|
||||
|
||||
### Security
|
||||
|
||||
Elasticsearch has capabilities to enable authorization using
|
||||
[X-Pack plugin][xPack]. See configuration parameter `xpack.security.enabled`
|
||||
in Elasticsearch and Kibana configurations. It can also be set via
|
||||
`XPACK_SECURITY_ENABLED` env variable. After enabling the feature,
|
||||
follow [official documentation][setupCreds] to set up credentials in
|
||||
Elasticsearch and Kibana. Don't forget to propagate those credentials also to
|
||||
Fluentd in its [configuration][fluentdCreds], using for example
|
||||
[environment variables][fluentdEnvVar]. You can utilize [ConfigMaps][configMap]
|
||||
and [Secrets][secret] to store credentials in the Kubernetes apiserver.
|
||||
|
||||
### Initialization
|
||||
|
||||
The Elasticsearch Statefulset manifest specifies that there shall be an
|
||||
[init container][initContainer] executing before Elasticsearch containers
|
||||
themselves, in order to ensure that the kernel state variable
|
||||
`vm.max_map_count` is at least 262144, since this is a requirement of
|
||||
Elasticsearch. You may remove the init container if you know that your host
|
||||
OS meets this requirement.
|
||||
|
||||
### Storage
|
||||
|
||||
The Elasticsearch StatefulSet will use the [EmptyDir][emptyDir] volume to
|
||||
store data. EmptyDir is erased when the pod terminates, here it is used only
|
||||
for testing purposes. **Important:** please change the storage to persistent
|
||||
volume claim before actually using this StatefulSet in your setup!
|
||||
|
||||
## Fluentd
|
||||
|
||||
Fluentd is deployed as a [DaemonSet][daemonSet] which spawns a pod on each
|
||||
node that reads logs, generated by kubelet, container runtime and containers
|
||||
and sends them to Elasticsearch.
|
||||
|
||||
**Note:** in order for Fluentd to work, every Kubernetes node must be labeled
|
||||
with `beta.kubernetes.io/fluentd-ds-ready=true`, as otherwise the Fluentd
|
||||
DaemonSet will ignore them.
|
||||
|
||||
Learn more in the [official Kubernetes documentation][k8sElasticsearchDocs].
|
||||
|
||||
### Known problems
|
||||
|
||||
Since Fluentd talks to the Elasticsearch service inside the cluster, instances
|
||||
on masters won't work, because masters have no kube-proxy. Don't mark masters
|
||||
with a label mentioned in the previous paragraph or add a taint on them to
|
||||
avoid Fluentd pods scheduling there.
|
||||
|
||||
[fluentd]: http://www.fluentd.org/
|
||||
[elasticsearch]: https://www.elastic.co/products/elasticsearch
|
||||
[kibana]: https://www.elastic.co/products/kibana
|
||||
[xPack]: https://www.elastic.co/products/x-pack
|
||||
[setupCreds]: https://www.elastic.co/guide/en/x-pack/current/setting-up-authentication.html#reset-built-in-user-passwords
|
||||
[fluentdCreds]: https://github.com/uken/fluent-plugin-elasticsearch#user-password-path-scheme-ssl_verify
|
||||
[fluentdEnvVar]: https://docs.fluentd.org/v0.12/articles/faq#how-can-i-use-environment-variables-to-configure-parameters-dynamically
|
||||
[configMap]: https://kubernetes.io/docs/tasks/configure-pod-container/configmap/
|
||||
[secret]: https://kubernetes.io/docs/concepts/configuration/secret/
|
||||
[statefulSet]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset
|
||||
[initContainer]: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
|
||||
[emptyDir]: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
|
||||
[daemonSet]: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
|
||||
[k8sElasticsearchDocs]: https://kubernetes.io/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana
|
||||
|
||||
[]()
|
1
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/.gitignore
generated
vendored
Normal file
1
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
elasticsearch_logging_discovery
|
41
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/BUILD
generated
vendored
Normal file
41
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/BUILD
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "es-image",
|
||||
importpath = "k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image",
|
||||
library = ":go_default_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["elasticsearch_logging_discovery.go"],
|
||||
importpath = "k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
25
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/Dockerfile
generated
vendored
Normal file
25
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM docker.elastic.co/elasticsearch/elasticsearch:5.6.4
|
||||
|
||||
VOLUME ["/data"]
|
||||
EXPOSE 9200 9300
|
||||
|
||||
COPY elasticsearch_logging_discovery run.sh bin/
|
||||
COPY config/elasticsearch.yml config/log4j2.properties config/
|
||||
|
||||
USER root
|
||||
RUN chown -R elasticsearch:elasticsearch ./
|
||||
CMD ["bin/run.sh"]
|
31
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/Makefile
generated
vendored
Executable file
31
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/Makefile
generated
vendored
Executable file
@ -0,0 +1,31 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
.PHONY: binary build push
|
||||
|
||||
PREFIX = gcr.io/google-containers
|
||||
IMAGE = elasticsearch
|
||||
TAG = v5.6.4
|
||||
|
||||
build:
|
||||
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .
|
||||
|
||||
push:
|
||||
gcloud docker -- push $(PREFIX)/$(IMAGE):$(TAG)
|
||||
|
||||
binary:
|
||||
CGO_ENABLED=0 GOOS=linux go build -a -ldflags "-w" elasticsearch_logging_discovery.go
|
||||
|
||||
clean:
|
||||
rm elasticsearch_logging_discovery
|
17
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/config/elasticsearch.yml
generated
vendored
Normal file
17
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/config/elasticsearch.yml
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
cluster.name: kubernetes-logging
|
||||
|
||||
node.name: ${NODE_NAME}
|
||||
node.master: ${NODE_MASTER}
|
||||
node.data: ${NODE_DATA}
|
||||
|
||||
transport.tcp.port: ${TRANSPORT_PORT}
|
||||
http.port: ${HTTP_PORT}
|
||||
|
||||
path.data: /data
|
||||
|
||||
network.host: 0.0.0.0
|
||||
|
||||
discovery.zen.minimum_master_nodes: ${MINIMUM_MASTER_NODES}
|
||||
|
||||
xpack.security.enabled: false
|
||||
xpack.monitoring.enabled: false
|
11
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/config/log4j2.properties
generated
vendored
Normal file
11
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/config/log4j2.properties
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
status = error
|
||||
|
||||
appender.console.type = Console
|
||||
appender.console.name = console
|
||||
appender.console.layout.type = PatternLayout
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
|
||||
|
||||
rootLogger.level = info
|
||||
rootLogger.appenderRef.console.ref = console
|
||||
rootLogger.action = debug
|
||||
rootLogger.com.amazonaws = warn
|
128
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go
generated
vendored
Normal file
128
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go
generated
vendored
Normal file
@ -0,0 +1,128 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
)
|
||||
|
||||
func buildConfigFromEnvs(masterURL, kubeconfigPath string) (*restclient.Config, error) {
|
||||
if kubeconfigPath == "" && masterURL == "" {
|
||||
kubeconfig, err := restclient.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return kubeconfig, nil
|
||||
}
|
||||
|
||||
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
||||
&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfigPath},
|
||||
&clientcmd.ConfigOverrides{ClusterInfo: clientapi.Cluster{Server: masterURL}}).ClientConfig()
|
||||
}
|
||||
|
||||
func flattenSubsets(subsets []api.EndpointSubset) []string {
|
||||
ips := []string{}
|
||||
for _, ss := range subsets {
|
||||
for _, addr := range ss.Addresses {
|
||||
ips = append(ips, fmt.Sprintf(`"%s"`, addr.IP))
|
||||
}
|
||||
}
|
||||
return ips
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
glog.Info("Kubernetes Elasticsearch logging discovery")
|
||||
|
||||
cc, err := buildConfigFromEnvs(os.Getenv("APISERVER_HOST"), os.Getenv("KUBE_CONFIG_FILE"))
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to make client: %v", err)
|
||||
}
|
||||
client, err := clientset.NewForConfig(cc)
|
||||
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to make client: %v", err)
|
||||
}
|
||||
namespace := metav1.NamespaceSystem
|
||||
envNamespace := os.Getenv("NAMESPACE")
|
||||
if envNamespace != "" {
|
||||
if _, err := client.Core().Namespaces().Get(envNamespace, metav1.GetOptions{}); err != nil {
|
||||
glog.Fatalf("%s namespace doesn't exist: %v", envNamespace, err)
|
||||
}
|
||||
namespace = envNamespace
|
||||
}
|
||||
|
||||
var elasticsearch *api.Service
|
||||
serviceName := os.Getenv("ELASTICSEARCH_SERVICE_NAME")
|
||||
if serviceName == "" {
|
||||
serviceName = "elasticsearch-logging"
|
||||
}
|
||||
|
||||
// Look for endpoints associated with the Elasticsearch loggging service.
|
||||
// First wait for the service to become available.
|
||||
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
|
||||
elasticsearch, err = client.Core().Services(namespace).Get(serviceName, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
// If we did not find an elasticsearch logging service then log a warning
|
||||
// and return without adding any unicast hosts.
|
||||
if elasticsearch == nil {
|
||||
glog.Warningf("Failed to find the elasticsearch-logging service: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
var endpoints *api.Endpoints
|
||||
addrs := []string{}
|
||||
// Wait for some endpoints.
|
||||
count := 0
|
||||
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
|
||||
endpoints, err = client.Core().Endpoints(namespace).Get(serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
addrs = flattenSubsets(endpoints.Subsets)
|
||||
glog.Infof("Found %s", addrs)
|
||||
if len(addrs) > 0 && len(addrs) == count {
|
||||
break
|
||||
}
|
||||
count = len(addrs)
|
||||
}
|
||||
// If there was an error finding endpoints then log a warning and quit.
|
||||
if err != nil {
|
||||
glog.Warningf("Error finding endpoints: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
glog.Infof("Endpoints = %s", addrs)
|
||||
fmt.Printf("discovery.zen.ping.unicast.hosts: [%s]\n", strings.Join(addrs, ", "))
|
||||
}
|
29
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/run.sh
generated
vendored
Executable file
29
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/run.sh
generated
vendored
Executable file
@ -0,0 +1,29 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -ex
|
||||
|
||||
export NODE_NAME=${NODE_NAME:-${HOSTNAME}}
|
||||
export NODE_MASTER=${NODE_MASTER:-true}
|
||||
export NODE_DATA=${NODE_DATA:-true}
|
||||
export HTTP_PORT=${HTTP_PORT:-9200}
|
||||
export TRANSPORT_PORT=${TRANSPORT_PORT:-9300}
|
||||
export MINIMUM_MASTER_NODES=${MINIMUM_MASTER_NODES:-2}
|
||||
|
||||
chown -R elasticsearch:elasticsearch /data
|
||||
|
||||
./bin/elasticsearch_logging_discovery >> ./config/elasticsearch.yml
|
||||
exec su elasticsearch -c ./bin/es-docker
|
17
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-service.yaml
generated
vendored
Normal file
17
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-service.yaml
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: elasticsearch-logging
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "Elasticsearch"
|
||||
spec:
|
||||
ports:
|
||||
- port: 9200
|
||||
protocol: TCP
|
||||
targetPort: db
|
||||
selector:
|
||||
k8s-app: elasticsearch-logging
|
110
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml
generated
vendored
Normal file
110
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml
generated
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
# RBAC authn and authz
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: elasticsearch-logging
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: elasticsearch-logging
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- "services"
|
||||
- "namespaces"
|
||||
- "endpoints"
|
||||
verbs:
|
||||
- "get"
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
namespace: kube-system
|
||||
name: elasticsearch-logging
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: elasticsearch-logging
|
||||
namespace: kube-system
|
||||
apiGroup: ""
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: elasticsearch-logging
|
||||
apiGroup: ""
|
||||
---
|
||||
# Elasticsearch deployment itself
|
||||
apiVersion: apps/v1beta2
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: elasticsearch-logging
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: v5.6.4
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
serviceName: elasticsearch-logging
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: v5.6.4
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: v5.6.4
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
serviceAccountName: elasticsearch-logging
|
||||
containers:
|
||||
- image: gcr.io/google-containers/elasticsearch:v5.6.4
|
||||
name: elasticsearch-logging
|
||||
resources:
|
||||
# need more cpu upon initialization, therefore burstable class
|
||||
limits:
|
||||
cpu: 1000m
|
||||
requests:
|
||||
cpu: 100m
|
||||
ports:
|
||||
- containerPort: 9200
|
||||
name: db
|
||||
protocol: TCP
|
||||
- containerPort: 9300
|
||||
name: transport
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: elasticsearch-logging
|
||||
mountPath: /data
|
||||
env:
|
||||
- name: "NAMESPACE"
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumes:
|
||||
- name: elasticsearch-logging
|
||||
emptyDir: {}
|
||||
# Elasticsearch requires vm.max_map_count to be at least 262144.
|
||||
# If your OS already sets up this number to a higher value, feel free
|
||||
# to remove this init container.
|
||||
initContainers:
|
||||
- image: alpine:3.6
|
||||
command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
|
||||
name: elasticsearch-logging-init
|
||||
securityContext:
|
||||
privileged: true
|
373
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml
generated
vendored
Normal file
373
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml
generated
vendored
Normal file
@ -0,0 +1,373 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
data:
|
||||
containers.input.conf: |-
|
||||
# This configuration file for Fluentd / td-agent is used
|
||||
# to watch changes to Docker log files. The kubelet creates symlinks that
|
||||
# capture the pod name, namespace, container name & Docker container ID
|
||||
# to the docker logs for pods in the /var/log/containers directory on the host.
|
||||
# If running this fluentd configuration in a Docker container, the /var/log
|
||||
# directory should be mounted in the container.
|
||||
#
|
||||
# These logs are then submitted to Elasticsearch which assumes the
|
||||
# installation of the fluent-plugin-elasticsearch & the
|
||||
# fluent-plugin-kubernetes_metadata_filter plugins.
|
||||
# See https://github.com/uken/fluent-plugin-elasticsearch &
|
||||
# https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for
|
||||
# more information about the plugins.
|
||||
#
|
||||
# Example
|
||||
# =======
|
||||
# A line in the Docker log file might look like this JSON:
|
||||
#
|
||||
# {"log":"2014/09/25 21:15:03 Got request with path wombat\n",
|
||||
# "stream":"stderr",
|
||||
# "time":"2014-09-25T21:15:03.499185026Z"}
|
||||
#
|
||||
# The time_format specification below makes sure we properly
|
||||
# parse the time format produced by Docker. This will be
|
||||
# submitted to Elasticsearch and should appear like:
|
||||
# $ curl 'http://elasticsearch-logging:9200/_search?pretty'
|
||||
# ...
|
||||
# {
|
||||
# "_index" : "logstash-2014.09.25",
|
||||
# "_type" : "fluentd",
|
||||
# "_id" : "VBrbor2QTuGpsQyTCdfzqA",
|
||||
# "_score" : 1.0,
|
||||
# "_source":{"log":"2014/09/25 22:45:50 Got request with path wombat\n",
|
||||
# "stream":"stderr","tag":"docker.container.all",
|
||||
# "@timestamp":"2014-09-25T22:45:50+00:00"}
|
||||
# },
|
||||
# ...
|
||||
#
|
||||
# The Kubernetes fluentd plugin is used to write the Kubernetes metadata to the log
|
||||
# record & add labels to the log record if properly configured. This enables users
|
||||
# to filter & search logs on any metadata.
|
||||
# For example a Docker container's logs might be in the directory:
|
||||
#
|
||||
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b
|
||||
#
|
||||
# and in the file:
|
||||
#
|
||||
# 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
|
||||
#
|
||||
# where 997599971ee6... is the Docker ID of the running container.
|
||||
# The Kubernetes kubelet makes a symbolic link to this file on the host machine
|
||||
# in the /var/log/containers directory which includes the pod name and the Kubernetes
|
||||
# container name:
|
||||
#
|
||||
# synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
|
||||
# ->
|
||||
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
|
||||
#
|
||||
# The /var/log directory on the host is mapped to the /var/log directory in the container
|
||||
# running this instance of Fluentd and we end up collecting the file:
|
||||
#
|
||||
# /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
|
||||
#
|
||||
# This results in the tag:
|
||||
#
|
||||
# var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
|
||||
#
|
||||
# The Kubernetes fluentd plugin is used to extract the namespace, pod name & container name
|
||||
# which are added to the log message as a kubernetes field object & the Docker container ID
|
||||
# is also added under the docker field object.
|
||||
# The final tag is:
|
||||
#
|
||||
# kubernetes.var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
|
||||
#
|
||||
# And the final log record look like:
|
||||
#
|
||||
# {
|
||||
# "log":"2014/09/25 21:15:03 Got request with path wombat\n",
|
||||
# "stream":"stderr",
|
||||
# "time":"2014-09-25T21:15:03.499185026Z",
|
||||
# "kubernetes": {
|
||||
# "namespace": "default",
|
||||
# "pod_name": "synthetic-logger-0.25lps-pod",
|
||||
# "container_name": "synth-lgr"
|
||||
# },
|
||||
# "docker": {
|
||||
# "container_id": "997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b"
|
||||
# }
|
||||
# }
|
||||
#
|
||||
# This makes it easier for users to search for logs by pod name or by
|
||||
# the name of the Kubernetes container regardless of how many times the
|
||||
# Kubernetes pod has been restarted (resulting in a several Docker container IDs).
|
||||
|
||||
# Json Log Example:
|
||||
# {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
|
||||
# CRI Log Example:
|
||||
# 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
|
||||
<source>
|
||||
type tail
|
||||
path /var/log/containers/*.log
|
||||
pos_file /var/log/es-containers.log.pos
|
||||
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
||||
tag kubernetes.*
|
||||
read_from_head true
|
||||
format multi_format
|
||||
<pattern>
|
||||
format json
|
||||
time_key time
|
||||
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
||||
</pattern>
|
||||
<pattern>
|
||||
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
|
||||
time_format %Y-%m-%dT%H:%M:%S.%N%:z
|
||||
</pattern>
|
||||
</source>
|
||||
system.input.conf: |-
|
||||
# Example:
|
||||
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
|
||||
<source>
|
||||
type tail
|
||||
format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
|
||||
time_format %Y-%m-%d %H:%M:%S
|
||||
path /var/log/salt/minion
|
||||
pos_file /var/log/es-salt.pos
|
||||
tag salt
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
|
||||
<source>
|
||||
type tail
|
||||
format syslog
|
||||
path /var/log/startupscript.log
|
||||
pos_file /var/log/es-startupscript.log.pos
|
||||
tag startupscript
|
||||
</source>
|
||||
|
||||
# Examples:
|
||||
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
|
||||
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
|
||||
<source>
|
||||
type tail
|
||||
format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
|
||||
path /var/log/docker.log
|
||||
pos_file /var/log/es-docker.log.pos
|
||||
tag docker
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
|
||||
<source>
|
||||
type tail
|
||||
# Not parsing this, because it doesn't have anything particularly useful to
|
||||
# parse out of it (like severities).
|
||||
format none
|
||||
path /var/log/etcd.log
|
||||
pos_file /var/log/es-etcd.log.pos
|
||||
tag etcd
|
||||
</source>
|
||||
|
||||
# Multi-line parsing is required for all the kube logs because very large log
|
||||
# statements, such as those that include entire object bodies, get split into
|
||||
# multiple lines by glog.
|
||||
|
||||
# Example:
|
||||
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
|
||||
<source>
|
||||
type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/kubelet.log
|
||||
pos_file /var/log/es-kubelet.log.pos
|
||||
tag kubelet
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
|
||||
<source>
|
||||
type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/kube-proxy.log
|
||||
pos_file /var/log/es-kube-proxy.log.pos
|
||||
tag kube-proxy
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
|
||||
<source>
|
||||
type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/kube-apiserver.log
|
||||
pos_file /var/log/es-kube-apiserver.log.pos
|
||||
tag kube-apiserver
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
|
||||
<source>
|
||||
type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/kube-controller-manager.log
|
||||
pos_file /var/log/es-kube-controller-manager.log.pos
|
||||
tag kube-controller-manager
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
|
||||
<source>
|
||||
type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/kube-scheduler.log
|
||||
pos_file /var/log/es-kube-scheduler.log.pos
|
||||
tag kube-scheduler
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
|
||||
<source>
|
||||
type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/rescheduler.log
|
||||
pos_file /var/log/es-rescheduler.log.pos
|
||||
tag rescheduler
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
|
||||
<source>
|
||||
type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/glbc.log
|
||||
pos_file /var/log/es-glbc.log.pos
|
||||
tag glbc
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
|
||||
<source>
|
||||
type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/cluster-autoscaler.log
|
||||
pos_file /var/log/es-cluster-autoscaler.log.pos
|
||||
tag cluster-autoscaler
|
||||
</source>
|
||||
|
||||
# Logs from systemd-journal for interesting services.
|
||||
<source>
|
||||
type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "docker.service" }]
|
||||
pos_file /var/log/gcp-journald-docker.pos
|
||||
read_from_head true
|
||||
tag docker
|
||||
</source>
|
||||
|
||||
<source>
|
||||
type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
|
||||
pos_file /var/log/gcp-journald-kubelet.pos
|
||||
read_from_head true
|
||||
tag kubelet
|
||||
</source>
|
||||
|
||||
<source>
|
||||
type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
|
||||
pos_file /var/log/gcp-journald-node-problem-detector.pos
|
||||
read_from_head true
|
||||
tag node-problem-detector
|
||||
</source>
|
||||
forward.input.conf: |-
|
||||
# Takes the messages sent over TCP
|
||||
<source>
|
||||
type forward
|
||||
</source>
|
||||
monitoring.conf: |-
|
||||
# Prometheus Exporter Plugin
|
||||
# input plugin that exports metrics
|
||||
<source>
|
||||
@type prometheus
|
||||
</source>
|
||||
|
||||
<source>
|
||||
@type monitor_agent
|
||||
</source>
|
||||
|
||||
# input plugin that collects metrics from MonitorAgent
|
||||
<source>
|
||||
@type prometheus_monitor
|
||||
<labels>
|
||||
host ${hostname}
|
||||
</labels>
|
||||
</source>
|
||||
|
||||
# input plugin that collects metrics for output plugin
|
||||
<source>
|
||||
@type prometheus_output_monitor
|
||||
<labels>
|
||||
host ${hostname}
|
||||
</labels>
|
||||
</source>
|
||||
|
||||
# input plugin that collects metrics for in_tail plugin
|
||||
<source>
|
||||
@type prometheus_tail_monitor
|
||||
<labels>
|
||||
host ${hostname}
|
||||
</labels>
|
||||
</source>
|
||||
output.conf: |-
|
||||
# Enriches records with Kubernetes metadata
|
||||
<filter kubernetes.**>
|
||||
type kubernetes_metadata
|
||||
</filter>
|
||||
|
||||
<match **>
|
||||
type elasticsearch
|
||||
log_level info
|
||||
include_tag_key true
|
||||
host elasticsearch-logging
|
||||
port 9200
|
||||
logstash_format true
|
||||
# Set the chunk limits.
|
||||
buffer_chunk_limit 2M
|
||||
buffer_queue_limit 8
|
||||
flush_interval 5s
|
||||
# Never wait longer than 5 minutes between retries.
|
||||
max_retry_wait 30
|
||||
# Disable the limit on the number of retries (retry forever).
|
||||
disable_retry_limit
|
||||
# Use multiple threads for processing.
|
||||
num_threads 2
|
||||
</match>
|
||||
metadata:
|
||||
name: fluentd-es-config-v0.1.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
115
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml
generated
vendored
Normal file
115
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml
generated
vendored
Normal file
@ -0,0 +1,115 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: fluentd-es
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: fluentd-es
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: fluentd-es
|
||||
labels:
|
||||
k8s-app: fluentd-es
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- "namespaces"
|
||||
- "pods"
|
||||
verbs:
|
||||
- "get"
|
||||
- "watch"
|
||||
- "list"
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: fluentd-es
|
||||
labels:
|
||||
k8s-app: fluentd-es
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: fluentd-es
|
||||
namespace: kube-system
|
||||
apiGroup: ""
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: fluentd-es
|
||||
apiGroup: ""
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluentd-es-v2.0.2
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: fluentd-es
|
||||
version: v2.0.2
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: fluentd-es
|
||||
version: v2.0.2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: fluentd-es
|
||||
kubernetes.io/cluster-service: "true"
|
||||
version: v2.0.2
|
||||
# This annotation ensures that fluentd does not get evicted if the node
|
||||
# supports critical pod annotation based priority scheme.
|
||||
# Note that this does not guarantee admission on the nodes (#40573).
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
serviceAccountName: fluentd-es
|
||||
containers:
|
||||
- name: fluentd-es
|
||||
image: gcr.io/google-containers/fluentd-elasticsearch:v2.0.2
|
||||
env:
|
||||
- name: FLUENTD_ARGS
|
||||
value: --no-supervisor -q
|
||||
resources:
|
||||
limits:
|
||||
memory: 500Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
volumeMounts:
|
||||
- name: varlog
|
||||
mountPath: /var/log
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
- name: libsystemddir
|
||||
mountPath: /host/lib
|
||||
readOnly: true
|
||||
- name: config-volume
|
||||
mountPath: /etc/fluent/config.d
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/fluentd-ds-ready: "true"
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: varlog
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
# It is needed to copy systemd library to decompress journals
|
||||
- name: libsystemddir
|
||||
hostPath:
|
||||
path: /usr/lib64
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: fluentd-es-config-v0.1.1
|
56
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Dockerfile
generated
vendored
Normal file
56
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This Dockerfile will build an image that is configured
|
||||
# to run Fluentd with an Elasticsearch plug-in and the
|
||||
# provided configuration file.
|
||||
# The image acts as an executable for the binary /usr/sbin/td-agent.
|
||||
# Note that fluentd is run with root permssion to allow access to
|
||||
# log files with root only access under /var/log/containers/*
|
||||
|
||||
FROM debian:stretch-slim
|
||||
|
||||
COPY clean-apt /usr/bin
|
||||
COPY clean-install /usr/bin
|
||||
COPY Gemfile /Gemfile
|
||||
|
||||
# 1. Install & configure dependencies.
|
||||
# 2. Install fluentd via ruby.
|
||||
# 3. Remove build dependencies.
|
||||
# 4. Cleanup leftover caches & files.
|
||||
RUN BUILD_DEPS="make gcc g++ libc6-dev ruby-dev" \
|
||||
&& clean-install $BUILD_DEPS \
|
||||
ca-certificates \
|
||||
libjemalloc1 \
|
||||
ruby \
|
||||
&& echo 'gem: --no-document' >> /etc/gemrc \
|
||||
&& gem install --file Gemfile \
|
||||
&& apt-get purge -y --auto-remove \
|
||||
-o APT::AutoRemove::RecommendsImportant=false \
|
||||
$BUILD_DEPS \
|
||||
&& clean-apt \
|
||||
# Ensure fluent has enough file descriptors
|
||||
&& ulimit -n 65536
|
||||
|
||||
# Copy the Fluentd configuration file for logging Docker container logs.
|
||||
COPY fluent.conf /etc/fluent/fluent.conf
|
||||
COPY run.sh /run.sh
|
||||
|
||||
# Expose prometheus metrics.
|
||||
EXPOSE 80
|
||||
|
||||
ENV LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
|
||||
|
||||
# Start Fluentd to pick up our config that watches Docker container logs.
|
||||
CMD /run.sh $FLUENTD_ARGS
|
10
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Gemfile
generated
vendored
Normal file
10
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Gemfile
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
source 'https://rubygems.org'
|
||||
|
||||
gem 'fluentd', '~>0.12.32'
|
||||
gem 'activesupport', '~>4.2.6'
|
||||
gem 'fluent-plugin-kubernetes_metadata_filter', '~>0.27.0'
|
||||
gem 'fluent-plugin-elasticsearch', '~>1.9.5'
|
||||
gem 'fluent-plugin-systemd', '~>0.0.8'
|
||||
gem 'fluent-plugin-prometheus', '~>0.3.0'
|
||||
gem 'fluent-plugin-multi-format-parser', '~>0.1.1'
|
||||
gem 'oj', '~>2.18.1'
|
25
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile
generated
vendored
Normal file
25
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
.PHONY: build push
|
||||
|
||||
PREFIX = gcr.io/google-containers
|
||||
IMAGE = fluentd-elasticsearch
|
||||
TAG = v2.0.2
|
||||
|
||||
build:
|
||||
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .
|
||||
|
||||
push:
|
||||
gcloud docker -- push $(PREFIX)/$(IMAGE):$(TAG)
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user