vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

View File

@ -0,0 +1,29 @@
This directory forms the base of the main SaltStack configuration. The
place to start with any SaltStack configuration is
[top.sls](top.sls). However, unless you are particularly keen on
reading Jinja templates, the following tables break down what
configurations run on what providers. (NB: The [_states](_states/)
directory is a special directory included by Salt for `ensure` blocks,
and is only used for the [docker](docker/) config.)
Key: M = Config applies to master, n = config applies to nodes
Config | GCE | Vagrant | AWS | Azure
----------------------------------------------------|-------|---------|-----|------
[debian-auto-upgrades](debian-auto-upgrades/) | M n | M n | M n | M n
[docker](docker/) | M n | M n | M n | M n
[etcd](etcd/) | M | M | M | M
[generate-cert](generate-cert/) | M | M | M | M
[kube-addons](kube-addons/) | M | M | M | M
[kube-apiserver](kube-apiserver/) | M | M | M | M
[kube-controller-manager](kube-controller-manager/) | M | M | M | M
[kube-proxy](kube-proxy/) | n | n | n | n
[kube-scheduler](kube-scheduler/) | M | M | M | M
[kubelet](kubelet/) | M n | M n | M n | M n
[logrotate](logrotate/) | M n | n | M n | M n
[supervisord](supervisor/) | M n | M n | M n | M n
[base](base.sls) | M n | M n | M n | M n
[kube-client-tools](kube-client-tools.sls) | M | M | M | M
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/saltbase/salt/README.md?pixel)]()

View File

@ -0,0 +1,60 @@
pkg-core:
pkg.installed:
- names:
- curl
- ebtables
{% if grains['os_family'] == 'RedHat' %}
- python
- git
- socat
{% else %}
- apt-transport-https
- python-apt
- nfs-common
- socat
{% endif %}
# Ubuntu installs netcat-openbsd by default, but on GCE/Debian netcat-traditional is installed.
# They behave slightly differently.
# For sanity, we try to make sure we have the same netcat on all OSes (#15166)
{% if grains['os'] == 'Ubuntu' %}
- netcat-traditional
{% endif %}
# Make sure git is installed for mounting git volumes
{% if grains['os'] == 'Ubuntu' %}
- git
{% endif %}
# Fix ARP cache issues on AWS by setting net.ipv4.neigh.default.gc_thresh1=0
# See issue #23395
{% if grains.get('cloud') == 'aws' %}
# Work around Salt #18089: https://github.com/saltstack/salt/issues/18089
# (we also have to give it a different id from the same fix elsewhere)
99-salt-conf-with-a-different-id:
file.touch:
- name: /etc/sysctl.d/99-salt.conf
net.ipv4.neigh.default.gc_thresh1:
sysctl.present:
- value: 0
{% endif %}
/usr/local/share/doc/kubernetes:
file.directory:
- user: root
- group: root
- mode: 755
- makedirs: True
/usr/local/share/doc/kubernetes/LICENSES:
file.managed:
- source: salt://kube-docs/LICENSES
- user: root
- group: root
- mode: 644
/usr/local/share/doc/kubernetes/kubernetes-src.tar.gz:
file.managed:
- source: salt://kube-docs/kubernetes-src.tar.gz
- user: root
- group: root
- mode: 644

View File

@ -0,0 +1,6 @@
approvers:
- bowei
- dnardo
reviewers:
- bowei
- dnardo

View File

@ -0,0 +1,9 @@
{% if pillar.get('network_policy_provider', '').lower() == 'calico' %}
ip6_tables:
kmod.present
xt_set:
kmod.present
{% endif -%}

View File

@ -0,0 +1,12 @@
reviewers:
- mwielgus
- jszczepkowski
- MaciekPytel
- aleksandra-malinowska
- bskiba
approvers:
- mwielgus
- jszczepkowski
- MaciekPytel
- aleksandra-malinowska
- bskiba

View File

@ -0,0 +1,106 @@
{% if pillar.get('enable_cluster_autoscaler', '').lower() == 'true' %}
{% set cloud_config = "" -%}
{% set cloud_config_mount = "" -%}
{% set cloud_config_volume = "" -%}
{% if grains.cloud == 'gce' and grains.cloud_config is defined -%}
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%}
{% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\", \"type\": \"FileOrCreate\"}}," -%}
{% endif -%}
{% set params = pillar['autoscaler_mig_config'] + " " + cloud_config + " " + pillar.get('autoscaler_expander_config', '') -%}
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "cluster-autoscaler",
"namespace": "kube-system",
"labels": {
"tier": "cluster-management",
"component": "cluster-autoscaler"
}
},
"spec": {
"hostNetwork": true,
"containers": [
{
"name": "cluster-autoscaler",
"image": "gcr.io/google_containers/cluster-autoscaler:v1.1.0",
"livenessProbe": {
"httpGet": {
"path": "/health-check",
"port": 8085
},
"initialDelaySeconds": 600,
"periodSeconds": 60
},
"command": [
"./run.sh",
"--kubernetes=http://127.0.0.1:8080?inClusterConfig=f",
"--v=4",
"--logtostderr=true",
"--write-status-configmap=true",
"--balance-similar-node-groups=true",
"{{params}}"
],
"env": [
{
"name": "LOG_OUTPUT",
"value": "/var/log/cluster-autoscaler.log"
}
],
# TODO: Make resource requirements depend on the size of the cluster
"resources": {
"requests": {
"cpu": "10m",
"memory": "300Mi"
}
},
"volumeMounts": [
{{cloud_config_mount}}
{
"name": "ssl-certs",
"readOnly": true,
"mountPath": "/etc/ssl/certs"
},
{
"name": "usrsharecacerts",
"readOnly": true,
"mountPath": "/usr/share/ca-certificates"
},
{
"name": "logfile",
"mountPath": "/var/log/cluster-autoscaler.log",
"readOnly": false
}
],
"terminationMessagePath": "/dev/termination-log",
"imagePullPolicy": "IfNotPresent"
}
],
"volumes": [
{{cloud_config_volume}}
{
"name": "ssl-certs",
"hostPath": {
"path": "/etc/ssl/certs"
}
},
{
"name": "usrsharecacerts",
"hostPath": {
"path": "/usr/share/ca-certificates"
}
},
{
"name": "logfile",
"hostPath": {
"path": "/var/log/cluster-autoscaler.log",
"type": "FileOrCreate"
}
}
],
"restartPolicy": "Always"
}
}
{% endif %}

View File

@ -0,0 +1,25 @@
# Copy autoscaler manifest to manifests folder for master.
# The ordering of salt states for service docker, kubelet and
# master-addon below is very important to avoid the race between
# salt restart docker or kubelet and kubelet start master components.
# Please see http://issue.k8s.io/10122#issuecomment-114566063
# for detail explanation on this very issue.
/etc/kubernetes/manifests/cluster-autoscaler.manifest:
file.managed:
- source: salt://cluster-autoscaler/cluster-autoscaler.manifest
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: true
- dir_mode: 755
- require:
- service: docker
- service: kubelet
/var/log/cluster-autoscaler.log:
file.managed:
- user: root
- group: root
- mode: 644

View File

@ -0,0 +1,8 @@
approvers:
- bowei
- dnardo
- freehan
reviewers:
- bowei
- dnardo
- freehan

View File

@ -0,0 +1,41 @@
/home/kubernetes:
file.directory:
- user: root
- group: root
- mode: 755
- makedirs: True
/etc/cni/net.d:
file.directory:
- user: root
- group: root
- mode: 755
- makedirs: True
# These are all available CNI network plugins.
cni-tar:
archive:
- extracted
- user: root
- name: /home/kubernetes/bin
- makedirs: True
- source: https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.6.0.tgz
- tar_options: v
- source_hash: md5=9534876FAE7DBE813CDAB404DC1F9219
- archive_format: tar
- if_missing: /home/kubernetes/bin
{% if grains['cloud'] is defined and grains.cloud in [ 'vagrant' ] %}
# Install local CNI network plugins in a Vagrant environment
cmd-local-cni-plugins:
cmd.run:
- name: |
cp -v /vagrant/cluster/network-plugins/cni/bin/* /home/kubernetes/bin/.
chmod +x /home/kubernetes/bin/*
cmd-local-cni-config:
cmd.run:
- name: |
cp -v /vagrant/cluster/network-plugins/cni/config/* /etc/cni/net.d/.
chown root:root /etc/cni/net.d/*
chmod 744 /etc/cni/net.d/*
{% endif -%}

View File

@ -0,0 +1,4 @@
APT::Periodic::Update-Package-Lists "1";
APT::Periodic::Unattended-Upgrade "1";
APT::Periodic::AutocleanInterval "7";

View File

@ -0,0 +1,13 @@
{% if grains['os_family'] == 'Debian' %}
unattended-upgrades:
pkg.installed
'/etc/apt/apt.conf.d/20auto-upgrades':
file.managed:
- source: salt://debian-auto-upgrades/20auto-upgrades
- user: root
- group: root
- mode: 644
- require:
- pkg: unattended-upgrades
{% endif %}

View File

@ -0,0 +1,8 @@
{% set docker_opts = "" -%}
{% if grains.docker_opts is defined and grains.docker_opts -%}
{% set docker_opts = grains.docker_opts -%}
{% endif -%}
DOCKER_OPTS='{{docker_opts}}'
OPTIONS='{{docker_opts}}'
DOCKER_CERT_PATH=/etc/docker

View File

@ -0,0 +1,18 @@
{% set grains_opts = grains.get('docker_opts', '') -%}
{% set e2e_opts = '' -%}
{% if pillar.get('e2e_storage_test_environment', '').lower() == 'true' -%}
{% set e2e_opts = '-s devicemapper' -%}
{% endif -%}
{% set bridge_opts = "--bridge=cbr0" %}
{% if pillar.get('network_provider', '').lower() == 'kubenet' %}
{% set bridge_opts = "" %}
{% endif -%}
{% if pillar.get('network_provider', '').lower() == 'cni' %}
{% set bridge_opts = "" %}
{% endif -%}
{% set log_level = "--log-level=warn" -%}
{% if pillar['docker_test_log_level'] is defined -%}
{% set log_level = pillar['docker_test_log_level'] -%}
{% endif -%}
DOCKER_OPTS="{{grains_opts}} {{e2e_opts}} {{bridge_opts}} --iptables=false --ip-masq=false {{log_level}}"
DOCKER_NOFILE=1000000

View File

@ -0,0 +1,44 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is intended to be run periodically, to check the health
# of docker. If it detects a failure, it will restart docker using systemctl.
if timeout 10 docker version > /dev/null; then
exit 0
fi
echo "docker failed"
echo "Giving docker 30 seconds grace before restarting"
sleep 30
if timeout 10 docker version > /dev/null; then
echo "docker recovered"
exit 0
fi
echo "docker still down; triggering docker restart"
systemctl restart docker
echo "Waiting 60 seconds to give docker time to start"
sleep 60
if timeout 10 docker version > /dev/null; then
echo "docker recovered"
exit 0
fi
echo "docker still failing"

View File

@ -0,0 +1,9 @@
[Unit]
Description=Run docker-healthcheck once
[Service]
Type=oneshot
ExecStart=/opt/kubernetes/helpers/docker-healthcheck
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,9 @@
[Unit]
Description=Trigger docker-healthcheck periodically
[Timer]
OnUnitInactiveSec=10s
Unit=docker-healthcheck.service
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,22 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is intended to be run before we start Docker.
# cleanup docker network checkpoint to avoid running into known issue
# of docker (https://github.com/docker/docker/issues/18283)
rm -rf /var/lib/docker/network

View File

@ -0,0 +1 @@
deb https://apt.dockerproject.org/repo debian-{{ salt['grains.get']('oscodename') }} main

View File

@ -0,0 +1,21 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target docker.socket
Requires=docker.socket
[Service]
Type=notify
EnvironmentFile={{ environment_file }}
ExecStart=/usr/bin/docker daemon -H fd:// "$DOCKER_OPTS"
MountFlags=slave
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
Restart=always
RestartSec=2s
StartLimitInterval=0
ExecStartPre=/opt/kubernetes/helpers/docker-prestart
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,535 @@
{% if pillar.get('is_systemd') %}
{% set environment_file = '/etc/sysconfig/docker' %}
{% else %}
{% set environment_file = '/etc/default/docker' %}
{% endif %}
bridge-utils:
pkg.installed
{% if grains.os_family == 'RedHat' %}
{{ environment_file }}:
file.managed:
- source: salt://docker/default
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: true
{% if grains.cloud is defined and grains.cloud == 'openstack' %}
cbr0:
# workaround https://github.com/saltstack/salt/issues/20570
kmod.present:
- name: bridge
network.managed:
- enabled: True
- type: bridge
- proto: none
- ports: none
- bridge: cbr0
- delay: 0
- bypassfirewall: True
- require_in:
- service: docker
- require:
- kmod: cbr0
{% endif %}
{% if (grains.os == 'Fedora' and grains.osrelease_info[0] >= 22) or (grains.os == 'CentOS' and grains.osrelease_info[0] >= 7) %}
docker:
pkg:
- installed
service.running:
- enable: True
- require:
- pkg: docker
- watch:
- file: {{ environment_file }}
- pkg: docker
{% else %}
docker-io:
pkg:
- installed
docker:
service.running:
- enable: True
- require:
- pkg: docker-io
- watch:
- file: {{ environment_file }}
- pkg: docker-io
{% endif %}
{% elif grains.cloud is defined and grains.cloud == 'azure-legacy' %}
{% if pillar.get('is_systemd') %}
{{ pillar.get('systemd_system_path') }}/docker.service:
file.managed:
- source: salt://docker/docker.service
- template: jinja
- user: root
- group: root
- mode: 644
- defaults:
environment_file: {{ environment_file }}
# The docker service.running block below doesn't work reliably
# Instead we run our script which e.g. does a systemd daemon-reload
# But we keep the service block below, so it can be used by dependencies
# TODO: Fix this
fix-service-docker:
cmd.wait:
- name: /opt/kubernetes/helpers/services bounce docker
- watch:
- file: {{ pillar.get('systemd_system_path') }}/docker.service
- file: {{ environment_file }}
{% endif %}
{{ environment_file }}:
file.managed:
- source: salt://docker/docker-defaults
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: true
- require:
- pkg: docker-engine
apt-key:
pkgrepo.managed:
- humanname: Dotdeb
- name: deb https://apt.dockerproject.org/repo ubuntu-trusty main
- dist: ubuntu-trusty
- file: /etc/apt/sources.list.d/docker.list
- keyid: 58118E89F3A912897C070ADBF76221572C52609D
- keyserver: hkp://p80.pool.sks-keyservers.net:80
lxc-docker:
pkg:
- purged
docker-io:
pkg:
- purged
cbr0:
network.managed:
- enabled: True
- type: bridge
{% if grains['roles'][0] == 'kubernetes-pool' %}
- proto: none
{% else %}
- proto: dhcp
{% endif %}
- ports: none
- bridge: cbr0
{% if grains['roles'][0] == 'kubernetes-pool' %}
- ipaddr: {{ grains['cbr-cidr'] }}
{% endif %}
- delay: 0
- bypassfirewall: True
- require_in:
- service: docker
docker-engine:
pkg:
- installed
- require:
- pkgrepo: 'apt-key'
docker:
service.running:
- enable: True
- require:
- file: {{ environment_file }}
- watch:
- file: {{ environment_file }}
{% elif grains.cloud is defined and grains.cloud in ['photon-controller'] and grains.os == 'Debian' and grains.osrelease_info[0] >=8 %}
{% if pillar.get('is_systemd') %}
/opt/kubernetes/helpers/docker-prestart:
file.managed:
- source: salt://docker/docker-prestart
- user: root
- group: root
- mode: 755
{{ pillar.get('systemd_system_path') }}/docker.service:
file.managed:
- source: salt://docker/docker.service
- template: jinja
- user: root
- group: root
- mode: 644
- defaults:
environment_file: {{ environment_file }}
- require:
- file: /opt/kubernetes/helpers/docker-prestart
- pkg: docker-engine
# The docker service.running block below doesn't work reliably
# Instead we run our script which e.g. does a systemd daemon-reload
# But we keep the service block below, so it can be used by dependencies
# TODO: Fix this
fix-service-docker:
cmd.wait:
- name: /opt/kubernetes/helpers/services bounce docker
- watch:
- file: {{ pillar.get('systemd_system_path') }}/docker.service
- file: {{ environment_file }}
{% endif %}
{{ environment_file }}:
file.managed:
- source: salt://docker/docker-defaults
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: true
- require:
- pkg: docker-engine
apt-key:
cmd.run:
- name: 'apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D'
- unless: 'apt-key finger | grep "5811 8E89"'
apt-update:
cmd.run:
- name: '/usr/bin/apt-get update -y'
- require:
- cmd : 'apt-key'
lxc-docker:
pkg:
- purged
docker-io:
pkg:
- purged
cbr0:
network.managed:
- enabled: True
- type: bridge
- proto: dhcp
- ports: none
- bridge: cbr0
- delay: 0
- bypassfirewall: True
- require_in:
- service: docker
/etc/apt/sources.list.d/docker.list:
file.managed:
- source: salt://docker/docker.list
- template: jinja
- user: root
- group: root
- mode: 644
- require:
- cmd: 'apt-update'
# restricting docker version to 1.9. with older version of docker we are facing
# issue https://github.com/docker/docker/issues/18793.
# newer version of docker 1.10.0 is not well tested yet.
# full comments: https://github.com/kubernetes/kubernetes/pull/20851
docker-engine:
pkg:
- installed
- version: 1.9.*
- require:
- file: /etc/apt/sources.list.d/docker.list
docker:
service.running:
- enable: True
- require:
- file: {{ environment_file }}
- watch:
- file: {{ environment_file }}
{% else %}
{% if grains.cloud is defined
and grains.cloud == 'gce' %}
# The default GCE images have ip_forwarding explicitly set to 0.
# Here we take care of commenting that out.
/etc/sysctl.d/11-gce-network-security.conf:
file.replace:
- pattern: '^net.ipv4.ip_forward=0'
- repl: '# net.ipv4.ip_forward=0'
{% endif %}
# Work around Salt #18089: https://github.com/saltstack/salt/issues/18089
/etc/sysctl.d/99-salt.conf:
file.touch
# TODO: This should really be based on network strategy instead of os_family
net.ipv4.ip_forward:
sysctl.present:
- value: 1
{% if pillar.get('softlockup_panic', '').lower() == 'true' %}
# TODO(dchen1107) Remove this once kernel.softlockup_panic is built into the CVM image.
/etc/sysctl.conf:
file.append:
- text:
- "kernel.softlockup_panic = 1"
- "kernel.softlockup_all_cpu_backtrace = 1"
'sysctl-reload':
cmd.run:
- name: 'sysctl --system'
- unless: 'sysctl -a | grep "kernel.softlockup_panic = 1"'
{% endif %}
{{ environment_file }}:
file.managed:
- source: salt://docker/docker-defaults
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: true
# Docker is on the ContainerVM image by default. The following
# variables are provided for other cloud providers, and for testing and dire circumstances, to allow
# overriding the Docker version that's in a ContainerVM image.
#
# To change:
#
# 1. Find new deb name at:
# http://apt.dockerproject.org/repo/pool/main/d/docker-engine
# 2. Download based on that:
# curl -O http://apt.dockerproject.org/repo/pool/main/d/docker-engine/<deb>
# 3. Upload to GCS:
# gsutil cp <deb> gs://kubernetes-release/docker/<deb>
# 4. Make it world readable:
# gsutil acl ch -R -g all:R gs://kubernetes-release/docker/<deb>
# 5. Get a hash of the deb:
# shasum <deb>
# 6. Update override_deb, override_deb_sha1, override_docker_ver with new
# deb name, new hash and new version
{% set storage_base='https://storage.googleapis.com/kubernetes-release/docker/' %}
{% set override_deb_url='' %}
{% if grains.get('cloud', '') == 'gce'
and grains.get('os_family', '') == 'Debian'
and grains.get('oscodename', '') == 'wheezy' -%}
{% set docker_pkg_name='' %}
{% set override_deb='' %}
{% set override_deb_sha1='' %}
{% set override_docker_ver='' %}
{% elif grains.get('cloud', '') == 'gce'
and grains.get('os_family', '') == 'Debian'
and grains.get('oscodename', '') == 'jessie' -%}
{% set docker_pkg_name='' %}
{% set override_deb='' %}
{% set override_deb_sha1='' %}
{% set override_docker_ver='' %}
{% elif grains.get('cloud', '') == 'aws'
and grains.get('os_family', '') == 'Debian'
and grains.get('oscodename', '') == 'jessie' -%}
# TODO: Get from google storage?
{% set docker_pkg_name='docker-engine' %}
{% set override_docker_ver='1.11.2-0~jessie' %}
{% set override_deb='docker-engine_1.11.2-0~jessie_amd64.deb' %}
{% set override_deb_url='http://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.11.2-0~jessie_amd64.deb' %}
{% set override_deb_sha1='c312f1f6fa0b34df4589bb812e4f7af8e28fd51d' %}
# Ubuntu presents as os_family=Debian, osfullname=Ubuntu
{% elif grains.get('cloud', '') == 'aws'
and grains.get('os_family', '') == 'Debian'
and grains.get('oscodename', '') == 'trusty' -%}
# TODO: Get from google storage?
{% set docker_pkg_name='docker-engine' %}
{% set override_docker_ver='1.11.2-0~trusty' %}
{% set override_deb='docker-engine_1.11.2-0~trusty_amd64.deb' %}
{% set override_deb_url='http://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.11.2-0~trusty_amd64.deb' %}
{% set override_deb_sha1='022dee31e68c6d572eaac750915786e4a6729d2a' %}
{% elif grains.get('cloud', '') == 'aws'
and grains.get('os_family', '') == 'Debian'
and grains.get('oscodename', '') == 'wily' -%}
# TODO: Get from google storage?
{% set docker_pkg_name='docker-engine' %}
{% set override_docker_ver='1.11.2-0~wily' %}
{% set override_deb='docker-engine_1.11.2-0~wily_amd64.deb' %}
{% set override_deb_url='http://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.11.2-0~wily_amd64.deb' %}
{% set override_deb_sha1='3e02f51fe18aa777eeb1676c3d9a75e5ea6d96c9' %}
{% else %}
{% set docker_pkg_name='lxc-docker-1.7.1' %}
{% set override_docker_ver='1.7.1' %}
{% set override_deb='lxc-docker-1.7.1_1.7.1_amd64.deb' %}
{% set override_deb_sha1='81abef31dd2c616883a61f85bfb294d743b1c889' %}
{% endif %}
{% if override_deb_url == '' %}
{% set override_deb_url=storage_base + override_deb %}
{% endif %}
{% if override_docker_ver != '' %}
purge-old-docker-package:
pkg.removed:
- pkgs:
- lxc-docker-1.6.2
/var/cache/docker-install/{{ override_deb }}:
file.managed:
- source: {{ override_deb_url }}
- source_hash: sha1={{ override_deb_sha1 }}
- user: root
- group: root
- mode: 644
- makedirs: true
# Drop the license file into /usr/share so that everything is crystal clear.
/usr/share/doc/docker/apache.txt:
file.managed:
- source: {{ storage_base }}apache2.txt
- source_hash: sha1=2b8b815229aa8a61e483fb4ba0588b8b6c491890
- user: root
- group: root
- mode: 644
- makedirs: true
libltdl7:
pkg.installed
docker-upgrade:
cmd.run:
- name: /opt/kubernetes/helpers/pkg install-no-start {{ docker_pkg_name }} {{ override_docker_ver }} /var/cache/docker-install/{{ override_deb }}
- require:
- file: /var/cache/docker-install/{{ override_deb }}
- pkg: libltdl7
{% endif %} # end override_docker_ver != ''
{% if pillar.get('is_systemd') %}
/opt/kubernetes/helpers/docker-prestart:
file.managed:
- source: salt://docker/docker-prestart
- user: root
- group: root
- mode: 755
# Default docker systemd unit file doesn't use an EnvironmentFile; replace it with one that does.
{{ pillar.get('systemd_system_path') }}/docker.service:
file.managed:
- source: salt://docker/docker.service
- template: jinja
- user: root
- group: root
- mode: 644
- defaults:
environment_file: {{ environment_file }}
- require:
- file: /opt/kubernetes/helpers/docker-prestart
# The docker service.running block below doesn't work reliably
# Instead we run our script which e.g. does a systemd daemon-reload
# But we keep the service block below, so it can be used by dependencies
# TODO: Fix this
fix-service-docker:
cmd.wait:
- name: /opt/kubernetes/helpers/services enable docker
- watch:
- file: {{ pillar.get('systemd_system_path') }}/docker.service
- file: {{ environment_file }}
{% if override_docker_ver != '' %}
- require:
- cmd: docker-upgrade
{% endif %}
/opt/kubernetes/helpers/docker-healthcheck:
file.managed:
- source: salt://docker/docker-healthcheck
- user: root
- group: root
- mode: 755
{{ pillar.get('systemd_system_path') }}/docker-healthcheck.service:
file.managed:
- source: salt://docker/docker-healthcheck.service
- template: jinja
- user: root
- group: root
- mode: 644
{{ pillar.get('systemd_system_path') }}/docker-healthcheck.timer:
file.managed:
- source: salt://docker/docker-healthcheck.timer
- template: jinja
- user: root
- group: root
- mode: 644
# Tell systemd to load the timer
fix-systemd-docker-healthcheck-timer:
cmd.wait:
- name: /opt/kubernetes/helpers/services bounce docker-healthcheck.timer
- watch:
- file: {{ pillar.get('systemd_system_path') }}/docker-healthcheck.timer
# Trigger a first run of docker-healthcheck; needed because the timer fires 10s after the previous run.
fix-systemd-docker-healthcheck-service:
cmd.wait:
- name: /opt/kubernetes/helpers/services bounce docker-healthcheck.service
- watch:
- file: {{ pillar.get('systemd_system_path') }}/docker-healthcheck.service
- require:
- cmd: fix-service-docker
{% endif %}
docker:
# Starting Docker is racy on aws for some reason. To be honest, since Monit
# is managing Docker restart we should probably just delete this whole thing
# but the kubernetes components use salt 'require' to set up a dag, and that
# complicated and scary to unwind.
# On AWS, we use a trick now... We don't start the docker service through Salt.
# Kubelet or our health checker will start it. But we use service.enabled,
# so we still have a `service: docker` node for our DAG.
{% if grains.cloud is defined and grains.cloud == 'aws' %}
service.enabled:
{% else %}
service.running:
- enable: True
{% endif %}
# If we put a watch on this, salt will try to start the service.
# We put the watch on the fixer instead
{% if not pillar.get('is_systemd') %}
- watch:
- file: {{ environment_file }}
{% if override_docker_ver != '' %}
- cmd: docker-upgrade
{% endif %}
{% endif %}
- require:
- file: {{ environment_file }}
{% if override_docker_ver != '' %}
- cmd: docker-upgrade
{% endif %}
{% if pillar.get('is_systemd') %}
- cmd: fix-service-docker
{% endif %}
{% endif %} # end grains.os_family != 'RedHat'

View File

@ -0,0 +1,111 @@
# e2e-image-puller seeds nodes in an e2e cluster with test images.
apiVersion: v1
kind: Pod
metadata:
name: e2e-image-puller
namespace: kube-system
labels:
name: e2e-image-puller
spec:
containers:
- name: image-puller
resources:
requests:
cpu: 100m
limits:
cpu: 100m
image: gcr.io/google_containers/busybox:1.24
# TODO: Replace this with a go script that pulls in parallel?
# Currently it takes ~5m to pull all e2e images, so this is OK, and
# fewer moving parts is always better.
# TODO: Replace the hardcoded image list with an autogen list; the list is
# currently hard-coded for static verification. It was generated via:
# grep -Iiroh "gcr.io/google_.*" "${KUBE_ROOT}/test/e2e" | \
# sed -e "s/[,\")}]//g" | awk '{print $1}' | sort | uniq | tr '\n' ' '
# We always want the subshell to exit 0 so this pod doesn't end up
# blocking tests in an Error state.
command:
- /bin/sh
- -c
- >
for i in
gcr.io/google_containers/alpine-with-bash:1.0
gcr.io/google_containers/apparmor-loader:0.1
gcr.io/google_containers/busybox:1.24
gcr.io/google_containers/dnsutils:e2e
gcr.io/google_containers/e2e-net-amd64:1.0
gcr.io/google_containers/echoserver:1.6
gcr.io/google_containers/eptest:0.1
gcr.io/google_containers/fakegitserver:0.1
gcr.io/google_containers/galera-install:0.1
gcr.io/google_containers/hostexec:1.2
gcr.io/google_containers/invalid-image:invalid-tag
gcr.io/google_containers/iperf:e2e
gcr.io/google_containers/jessie-dnsutils:e2e
gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.14.5
gcr.io/google_containers/liveness:e2e
gcr.io/google_containers/logs-generator:v0.1.0
gcr.io/google_containers/mounttest:0.8
gcr.io/google_containers/mounttest-user:0.5
gcr.io/google_containers/mysql-galera:e2e
gcr.io/google_containers/mysql-healthz:1.0
gcr.io/google_containers/netexec:1.4
gcr.io/google_containers/netexec:1.5
gcr.io/google_containers/netexec:1.7
gcr.io/google_containers/nettest:1.7
gcr.io/google_containers/nginx:1.7.9
gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.1
gcr.io/google_containers/nginx-slim:0.7
gcr.io/google_containers/nginx-slim:0.8
gcr.io/google_containers/node-problem-detector:v0.3.0
gcr.io/google_containers/pause
gcr.io/google_containers/porter:4524579c0eb935c056c8e75563b4e1eda31587e0
gcr.io/google_containers/portforwardtester:1.2
gcr.io/google_containers/redis-install-3.2.0:e2e
gcr.io/google_containers/resource_consumer:beta4
gcr.io/google_containers/resource_consumer/controller:beta4
gcr.io/kubernetes-e2e-test-images/serve-hostname-amd64:1.1
gcr.io/google_containers/servicelb:0.1
gcr.io/google_containers/test-webserver:e2e
gcr.io/google_containers/update-demo:kitten
gcr.io/google_containers/update-demo:nautilus
gcr.io/google_containers/volume-ceph:0.1
gcr.io/google_containers/volume-gluster:0.2
gcr.io/google_containers/volume-iscsi:0.1
gcr.io/google_containers/volume-nfs:0.8
gcr.io/google_containers/volume-rbd:0.1
gcr.io/google_containers/zookeeper-install-3.5.0-alpha:e2e
gcr.io/google_samples/gb-redisslave:nonexistent
; do echo $(date '+%X') pulling $i; docker pull $i 1>/dev/null; done; exit 0;
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/run/docker.sock
name: socket
- mountPath: /usr/bin/docker
name: docker
# Add a container that runs a health-check
- name: nethealth-check
resources:
requests:
cpu: 100m
limits:
cpu: 100m
image: gcr.io/google_containers/kube-nethealth-amd64:1.0
command:
- /bin/sh
- -c
- "/usr/bin/nethealth || true"
volumes:
- hostPath:
path: /var/run/docker.sock
type: Socket
name: socket
- hostPath:
path: /usr/bin/docker
type: File
name: docker
# This pod is really fire-and-forget.
restartPolicy: OnFailure
# This pod needs hostNetworking for true VM perf measurement as well as avoiding cbr0 issues
hostNetwork: true

View File

@ -0,0 +1,12 @@
/etc/kubernetes/manifests/e2e-image-puller.manifest:
file.managed:
- source: salt://e2e-image-puller/e2e-image-puller.manifest
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: true
- dir_mode: 755
- require:
- service: docker
- service: kubelet

View File

@ -0,0 +1,28 @@
e2e:
# Install various packages required by e2e tests to all hosts.
pkg.installed:
- refresh: true
- pkgs:
- targetcli
- ceph
{% if grains['os_family'] == 'RedHat' %}
- glusterfs-fuse
- rbd-fuse
- iscsi-initiator-utils
- nfs-utils
{% else %}
- glusterfs-client
- open-iscsi
- iscsitarget-dkms
- nfs-common
{% endif %}
{% if grains['os_family'] == 'Debian' %}
# On Debian, re-start open-iscsi to generate unique
# /etc/iscsi/initiatorname.iscsi
open-iscsi:
cmd.run:
- name: 'service open-iscsi restart'
{% endif %}

View File

@ -0,0 +1,115 @@
{% set etcd_protocol = 'http' -%}
{% set etcd_creds = '' -%}
{% if pillar.get('etcd_over_ssl', '').lower() == 'true' -%}
{% set etcd_protocol = 'https' -%}
{% set etcd_creds = '--peer-trusted-ca-file /srv/kubernetes/etcd-ca.crt --peer-cert-file /srv/kubernetes/etcd-peer.crt --peer-key-file /srv/kubernetes/etcd-peer.key -peer-client-cert-auth' -%}
{% endif -%}
{% set hostname = pillar.get('hostname', '') -%}
{% set cluster_state = (pillar.get('initial_etcd_cluster_state') or 'new') -%}
{% set etcd_cluster_array = (pillar.get('initial_etcd_cluster') or hostname).split(',') -%}
{% set etcd_cluster = '' -%}
{# We use vars dictionary to pass variables set inside the for loop, because jinja defines new variables inside the for loop that hide variables from the outside. #}
{% set vars = {'etcd_cluster': ''} -%}
{% for host in etcd_cluster_array -%}
{% if etcd_cluster != '' -%}
{% set etcd_cluster = etcd_cluster ~ ',' -%}
{% endif -%}
{% set etcd_cluster = etcd_cluster ~ 'etcd-' ~ host ~ '=' ~ etcd_protocol ~'://' ~ host ~ ':' ~ server_port -%}
{% do vars.update({'etcd_cluster': etcd_cluster}) -%}
{% endfor -%}
{% set etcd_cluster = vars.etcd_cluster -%}
{% set quota_bytes = '' -%}
{% if pillar.get('storage_backend', 'etcd3') == 'etcd3' -%}
{% set quota_bytes = '--quota-backend-bytes=4294967296' -%}
{% endif -%}
{% set liveness_probe_initial_delay = pillar.get('etcd_liveness_probe_initial_delay', 15) -%}
{% set srv_kube_path = "/srv/kubernetes" -%}
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"etcd-server{{ suffix }}",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": ""
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "etcd-container",
"image": "{{ pillar.get('etcd_docker_repository', 'gcr.io/google_containers/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.1.10') }}",
"resources": {
"requests": {
"cpu": {{ cpulimit }}
}
},
"command": [
"/bin/sh",
"-c",
"if [ -e /usr/local/bin/migrate-if-needed.sh ]; then /usr/local/bin/migrate-if-needed.sh 1>>/var/log/etcd{{ suffix }}.log 2>&1; fi; /usr/local/bin/etcd --name etcd-{{ hostname }} --listen-peer-urls {{ etcd_protocol }}://{{ hostname }}:{{ server_port }} --initial-advertise-peer-urls {{ etcd_protocol }}://{{ hostname }}:{{ server_port }} --advertise-client-urls http://127.0.0.1:{{ port }} --listen-client-urls http://127.0.0.1:{{ port }} {{ quota_bytes }} --data-dir /var/etcd/data{{ suffix }} --initial-cluster-state {{ cluster_state }} --initial-cluster {{ etcd_cluster }} {{ etcd_creds }} 1>>/var/log/etcd{{ suffix }}.log 2>&1"
],
"env": [
{ "name": "TARGET_STORAGE",
"value": "{{ pillar.get('storage_backend', 'etcd3') }}"
},
{ "name": "TARGET_VERSION",
"value": "{{ pillar.get('etcd_version', '3.1.10') }}"
},
{ "name": "DATA_DIRECTORY",
"value": "/var/etcd/data{{ suffix }}"
}
],
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": {{ port }},
"path": "/health"
},
"initialDelaySeconds": {{ liveness_probe_initial_delay }},
"timeoutSeconds": 15
},
"ports": [
{ "name": "serverport",
"containerPort": {{ server_port }},
"hostPort": {{ server_port }}
},
{ "name": "clientport",
"containerPort": {{ port }},
"hostPort": {{ port }}
}
],
"volumeMounts": [
{ "name": "varetcd",
"mountPath": "/var/etcd",
"readOnly": false
},
{ "name": "varlogetcd",
"mountPath": "/var/log/etcd{{ suffix }}.log",
"readOnly": false
},
{ "name": "etc",
"mountPath": "{{ srv_kube_path }}",
"readOnly": false
}
]
}
],
"volumes":[
{ "name": "varetcd",
"hostPath": {
"path": "/mnt/master-pd/var/etcd"}
},
{ "name": "varlogetcd",
"hostPath": {
"path": "/var/log/etcd{{ suffix }}.log",
"type": "FileOrCreate"}
},
{ "name": "etc",
"hostPath": {
"path": "{{ srv_kube_path }}"}
}
]
}}

View File

@ -0,0 +1,83 @@
# Early configurations of Kubernetes ran etcd on the host and as part of a migration step, we began to delete the host etcd
# It's possible though that the host has configured a separate etcd to configure other services like Flannel
# In that case, we do not want Salt to remove or stop the host service
# Note: its imperative that the host installed etcd not conflict with the Kubernetes managed etcd
{% if grains['keep_host_etcd'] is not defined %}
delete_etc_etcd_dir:
file.absent:
- name: /etc/etcd
delete_etcd_conf:
file.absent:
- name: /etc/etcd/etcd.conf
delete_etcd_default:
file.absent:
- name: /etc/default/etcd
{% if pillar.get('is_systemd') %}
delete_etcd_service_file:
file.absent:
- name: {{ pillar.get('systemd_system_path') }}/etcd.service
{% endif %}
delete_etcd_initd:
file.absent:
- name: /etc/init.d/etcd
#stop legacy etcd_service
stop_etcd-service:
service.dead:
- name: etcd
- enable: None
{% endif %}
touch /var/log/etcd.log:
cmd.run:
- creates: /var/log/etcd.log
touch /var/log/etcd-events.log:
cmd.run:
- creates: /var/log/etcd-events.log
/var/etcd:
file.directory:
- user: root
- group: root
- dir_mode: 700
- recurse:
- user
- group
- mode
/etc/kubernetes/manifests/etcd.manifest:
file.managed:
- source: salt://etcd/etcd.manifest
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: true
- dir_mode: 755
- context:
suffix: ""
port: 2379
server_port: 2380
cpulimit: '"200m"'
/etc/kubernetes/manifests/etcd-events.manifest:
file.managed:
- source: salt://etcd/etcd.manifest
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: true
- dir_mode: 755
- context:
suffix: "-events"
port: 4002
server_port: 2381
cpulimit: '"100m"'

View File

@ -0,0 +1,48 @@
{% set master_extra_sans=grains.get('master_extra_sans', '') %}
{% if grains.cloud is defined %}
{% if grains.cloud == 'gce' %}
{% set cert_ip='_use_gce_external_ip_' %}
{% endif %}
{% if grains.cloud == 'aws' %}
{% set cert_ip='_use_aws_external_ip_' %}
{% endif %}
{% if grains.cloud == 'azure-legacy' %}
{% set cert_ip='_use_azure_dns_name_' %}
{% endif %}
{% if grains.cloud == 'photon-controller' %}
{% set cert_ip=grains.ip_interfaces.eth0[0] %}
{% endif %}
{% endif %}
# If there is a pillar defined, override any defaults.
{% if pillar['cert_ip'] is defined %}
{% set cert_ip=pillar['cert_ip'] %}
{% endif %}
{% set certgen="make-cert.sh" %}
{% if cert_ip is defined %}
{% set certgen="make-ca-cert.sh" %}
{% endif %}
openssl:
pkg.installed: []
kube-cert:
group.present:
- system: True
kubernetes-cert:
cmd.script:
- unless: test -f /srv/kubernetes/server.cert
- source: salt://generate-cert/{{certgen}}
{% if cert_ip is defined %}
- args: {{cert_ip}} {{master_extra_sans}}
- require:
- pkg: curl
{% endif %}
- cwd: /
- user: root
- group: root
- shell: /bin/bash
- require:
- pkg: openssl

View File

@ -0,0 +1,112 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
DEBUG="${DEBUG:-false}"
if [ "${DEBUG}" == "true" ]; then
set -x
fi
cert_ip=$1
extra_sans=${2:-}
cert_dir=${CERT_DIR:-/srv/kubernetes}
cert_group=${CERT_GROUP:-kube-cert}
mkdir -p "$cert_dir"
use_cn=false
# TODO: Add support for discovery on other providers?
if [ "$cert_ip" == "_use_gce_external_ip_" ]; then
cert_ip=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip)
fi
if [ "$cert_ip" == "_use_aws_external_ip_" ]; then
# If there's no public IP assigned (e.g. this host is running on an internal subnet in a VPC), then
# curl will happily spit out the contents of AWS's 404 page and an exit code of zero.
#
# The string containing the 404 page trips up one of easyrsa's calls to openssl later; whichever
# one creates the CA certificate, because the 404 page is > 64 characters.
if cert_ip=$(curl -f -s http://169.254.169.254/latest/meta-data/public-ipv4); then
:
else
cert_ip=$(curl -f -s http://169.254.169.254/latest/meta-data/local-ipv4)
fi
fi
if [ "$cert_ip" == "_use_azure_dns_name_" ]; then
cert_ip=$(uname -n | awk -F. '{ print $2 }').cloudapp.net
use_cn=true
fi
sans="IP:${cert_ip}"
if [[ -n "${extra_sans}" ]]; then
sans="${sans},${extra_sans}"
fi
tmpdir=$(mktemp -d -t kubernetes_cacert.XXXXXX)
trap 'rm -rf "${tmpdir}"' EXIT
cd "${tmpdir}"
# TODO: For now, this is a patched tool that makes subject-alt-name work, when
# the fix is upstream move back to the upstream easyrsa. This is cached in GCS
# but is originally taken from:
# https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
#
# To update, do the following:
# curl -o easy-rsa.tar.gz https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
# gsutil cp easy-rsa.tar.gz gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
# gsutil acl ch -R -g all:R gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
#
# Due to GCS caching of public objects, it may take time for this to be widely
# distributed.
#
# Use ~/kube/easy-rsa.tar.gz if it exists, so that it can be
# pre-pushed in cases where an outgoing connection is not allowed.
if [ -f ~/kube/easy-rsa.tar.gz ]; then
ln -s ~/kube/easy-rsa.tar.gz .
else
curl -L -O https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz > /dev/null 2>&1
fi
tar xzf easy-rsa.tar.gz > /dev/null 2>&1
cd easy-rsa-master/easyrsa3
./easyrsa init-pki > /dev/null 2>&1
./easyrsa --batch "--req-cn=$cert_ip@`date +%s`" build-ca nopass > /dev/null 2>&1
if [ $use_cn = "true" ]; then
./easyrsa build-server-full $cert_ip nopass > /dev/null 2>&1
cp -p pki/issued/$cert_ip.crt "${cert_dir}/server.cert" > /dev/null 2>&1
cp -p pki/private/$cert_ip.key "${cert_dir}/server.key" > /dev/null 2>&1
else
./easyrsa --subject-alt-name="${sans}" build-server-full kubernetes-master nopass > /dev/null 2>&1
cp -p pki/issued/kubernetes-master.crt "${cert_dir}/server.cert" > /dev/null 2>&1
cp -p pki/private/kubernetes-master.key "${cert_dir}/server.key" > /dev/null 2>&1
fi
# Make a superuser client cert with subject "O=system:masters, CN=kubecfg"
./easyrsa --dn-mode=org \
--req-cn=kubecfg --req-org=system:masters \
--req-c= --req-st= --req-city= --req-email= --req-ou= \
build-client-full kubecfg nopass > /dev/null 2>&1
cp -p pki/ca.crt "${cert_dir}/ca.crt"
cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt"
cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key"
# Make server certs accessible to apiserver.
chgrp $cert_group "${cert_dir}/server.key" "${cert_dir}/server.cert" "${cert_dir}/ca.crt"
chmod 660 "${cert_dir}/server.key" "${cert_dir}/server.cert" "${cert_dir}/ca.crt"

View File

@ -0,0 +1,26 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cert_dir=${CERT_DIR:-/srv/kubernetes}
cert_group=${CERT_GROUP:-kube-cert}
mkdir -p "$cert_dir"
openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \
-subj "/CN=kubernetes.invalid/O=Kubernetes" \
-keyout "${cert_dir}/server.key" -out "${cert_dir}/server.cert"
chgrp $cert_group "${cert_dir}/server.key" "${cert_dir}/server.cert"
chmod 660 "${cert_dir}/server.key" "${cert_dir}/server.cert"

View File

@ -0,0 +1,14 @@
{% if grains['cloud'] is defined and grains['cloud'] == 'aws' %}
/usr/share/google:
file.directory:
- user: root
- group: root
- dir_mode: 755
/usr/share/google/safe_format_and_mount:
file.managed:
- source: salt://helpers/safe_format_and_mount
- user: root
- group: root
- mode: 755
{% endif %}

View File

@ -0,0 +1,144 @@
#! /bin/bash
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Mount a disk, formatting it if necessary. If the disk looks like it may
# have been formatted before, we will not format it.
#
# This script uses blkid and file to search for magic "formatted" bytes
# at the beginning of the disk. Furthermore, it attempts to use fsck to
# repair the filesystem before formatting it.
FSCK=fsck.ext4
MOUNT_OPTIONS="discard,defaults"
MKFS="mkfs.ext4 -F"
if [ -e /etc/redhat-release ]; then
if grep -q '7\..' /etc/redhat-release; then
FSCK=fsck.xfs
MKFS=mkfs.xfs
fi
fi
LOGTAG=safe_format_and_mount
LOGFACILITY=user
function log() {
local readonly severity=$1; shift;
logger -t ${LOGTAG} -p ${LOGFACILITY}.${severity} -s "$@"
}
function log_command() {
local readonly log_file=$(mktemp)
local readonly retcode
log info "Running: $*"
$* > ${log_file} 2>&1
retcode=$?
# only return the last 1000 lines of the logfile, just in case it's HUGE.
tail -1000 ${log_file} | logger -t ${LOGTAG} -p ${LOGFACILITY}.info -s
rm -f ${log_file}
return ${retcode}
}
function help() {
cat >&2 <<EOF
$0 [-f fsck_cmd] [-m mkfs_cmd] [-o mount_opts] <device> <mountpoint>
EOF
exit 0
}
while getopts ":hf:o:m:" opt; do
case $opt in
h) help;;
f) FSCK=$OPTARG;;
o) MOUNT_OPTIONS=$OPTARG;;
m) MKFS=$OPTARG;;
-) break;;
\?) log error "Invalid option: -${OPTARG}"; exit 1;;
:) log "Option -${OPTARG} requires an argument."; exit 1;;
esac
done
shift $(($OPTIND - 1))
readonly DISK=$1
readonly MOUNTPOINT=$2
[[ -z ${DISK} ]] && help
[[ -z ${MOUNTPOINT} ]] && help
function disk_looks_unformatted() {
blkid ${DISK}
if [[ $? == 0 ]]; then
return 0
fi
local readonly file_type=$(file --special-files ${DISK})
case ${file_type} in
*filesystem*)
return 0;;
esac
return 1
}
function format_disk() {
log_command ${MKFS} ${DISK}
}
function try_repair_disk() {
log_command ${FSCK} -a ${DISK}
local readonly fsck_return=$?
if [[ ${fsck_return} -ge 8 ]]; then
log error "Fsck could not correct errors on ${DISK}"
return 1
fi
if [[ ${fsck_return} -gt 0 ]]; then
log warning "Fsck corrected errors on ${DISK}"
fi
return 0
}
function try_mount() {
local mount_retcode
try_repair_disk
log_command mount -o ${MOUNT_OPTIONS} ${DISK} ${MOUNTPOINT}
mount_retcode=$?
if [[ ${mount_retcode} == 0 ]]; then
return 0
fi
# Check to see if it looks like a filesystem before formatting it.
disk_looks_unformatted ${DISK}
if [[ $? == 0 ]]; then
log error "Disk ${DISK} looks formatted but won't mount. Giving up."
return ${mount_retcode}
fi
# The disk looks like it's not been formatted before.
format_disk
if [[ $? != 0 ]]; then
log error "Format of ${DISK} failed."
fi
log_command mount -o ${MOUNT_OPTIONS} ${DISK} ${MOUNTPOINT}
mount_retcode=$?
if [[ ${mount_retcode} == 0 ]]; then
return 0
fi
log error "Tried everything we could, but could not mount ${DISK}."
return ${mount_retcode}
}
try_mount
exit $?

View File

@ -0,0 +1,215 @@
addon-dir-delete:
file.absent:
- name: /etc/kubernetes/addons
addon-dir-create:
file.directory:
- name: /etc/kubernetes/addons
- user: root
- group: root
- mode: 0755
- require:
- file: addon-dir-delete
{% if pillar.get('enable_cluster_monitoring', '').lower() == 'influxdb' %}
/etc/kubernetes/addons/cluster-monitoring/influxdb:
file.recurse:
- source: salt://kube-addons/cluster-monitoring/influxdb
- include_pat: E@(^.+\.yaml$|^.+\.json$)
- template: jinja
- user: root
- group: root
- dir_mode: 755
- file_mode: 644
{% endif %}
{% if pillar.get('enable_l7_loadbalancing', '').lower() == 'glbc' %}
/etc/kubernetes/addons/cluster-loadbalancing/glbc:
file.recurse:
- source: salt://kube-addons/cluster-loadbalancing/glbc
- include_pat: E@(^.+\.yaml$|^.+\.json$)
- template: jinja
- user: root
- group: root
- dir_mode: 755
- file_mode: 644
{% endif %}
{% if pillar.get('enable_cluster_monitoring', '').lower() == 'google' %}
/etc/kubernetes/addons/cluster-monitoring/google:
file.recurse:
- source: salt://kube-addons/cluster-monitoring/google
- include_pat: E@(^.+\.yaml$|^.+\.json$)
- template: jinja
- user: root
- group: root
- dir_mode: 755
- file_mode: 644
{% endif %}
{% if pillar.get('enable_cluster_monitoring', '').lower() == 'stackdriver' %}
/etc/kubernetes/addons/cluster-monitoring/stackdriver:
file.recurse:
- source: salt://kube-addons/cluster-monitoring/stackdriver
- include_pat: E@(^.+\.yaml$|^.+\.json$)
- template: jinja
- user: root
- group: root
- dir_mode: 755
- file_mode: 644
{% endif %}
{% if pillar.get('enable_cluster_monitoring', '').lower() == 'standalone' %}
/etc/kubernetes/addons/cluster-monitoring/standalone:
file.recurse:
- source: salt://kube-addons/cluster-monitoring/standalone
- include_pat: E@(^.+\.yaml$|^.+\.json$)
- template: jinja
- user: root
- group: root
- dir_mode: 755
- file_mode: 644
{% endif %}
{% if pillar.get('enable_cluster_monitoring', '').lower() == 'googleinfluxdb' %}
/etc/kubernetes/addons/cluster-monitoring/googleinfluxdb:
file.recurse:
- source: salt://kube-addons/cluster-monitoring
- include_pat: E@(^.+\.yaml$|^.+\.json$)
- exclude_pat: E@(^.+heapster-controller\.yaml$|^.+heapster-controller\.json$)
- template: jinja
- user: root
- group: root
- dir_mode: 755
- file_mode: 644
{% endif %}
{% if pillar.get('enable_cluster_dns', '').lower() == 'true' %}
/etc/kubernetes/addons/dns/kube-dns.yaml:
file.managed:
- source: salt://kube-addons/dns/kube-dns.yaml.in
- template: jinja
- group: root
- dir_mode: 755
- makedirs: True
{% endif %}
{% if pillar.get('enable_dns_horizontal_autoscaler', '').lower() == 'true'
and pillar.get('enable_cluster_dns', '').lower() == 'true' %}
/etc/kubernetes/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml:
file.managed:
- source: salt://kube-addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
- user: root
- group: root
- file_mode: 644
- makedirs: True
{% endif %}
{% if pillar.get('enable_cluster_registry', '').lower() == 'true' %}
/etc/kubernetes/addons/registry/registry-svc.yaml:
file.managed:
- source: salt://kube-addons/registry/registry-svc.yaml
- user: root
- group: root
- file_mode: 644
- makedirs: True
/etc/kubernetes/addons/registry/registry-rc.yaml:
file.managed:
- source: salt://kube-addons/registry/registry-rc.yaml
- user: root
- group: root
- file_mode: 644
- makedirs: True
/etc/kubernetes/addons/registry/registry-pv.yaml:
file.managed:
- source: salt://kube-addons/registry/registry-pv.yaml.in
- template: jinja
- user: root
- group: root
- file_mode: 644
- makedirs: True
/etc/kubernetes/addons/registry/registry-pvc.yaml:
file.managed:
- source: salt://kube-addons/registry/registry-pvc.yaml.in
- template: jinja
- user: root
- group: root
- file_mode: 644
- makedirs: True
{% endif %}
{% if pillar.get('enable_node_logging', '').lower() == 'true'
and 'logging_destination' in pillar
and pillar.get('enable_cluster_logging', '').lower() == 'true' %}
/etc/kubernetes/addons/fluentd-{{ pillar.get('logging_destination') }}:
file.recurse:
- source: salt://kube-addons/fluentd-{{ pillar.get('logging_destination') }}
- include_pat: E@^.+\.yaml$
- user: root
- group: root
- dir_mode: 755
- file_mode: 644
{% endif %}
{% if pillar.get('enable_metadata_proxy', '').lower() == 'true' %}
/etc/kubernetes/addons/metadata-proxy/gce:
file.recurse:
- source: salt://kube-addons/metadata-proxy/gce
- include_pat: E@^.+\.yaml$
- user: root
- group: root
- dir_mode: 755
- file_mode: 644
{% endif %}
{% if pillar.get('enable_pod_security_policy', '').lower() == 'true' %}
/etc/kubernetes/addons/podsecuritypolicies:
file.recurse:
- source: salt://kube-addons/podsecuritypolicies
- include_pat: E@^.+\.yaml$
- user: root
- group: root
- dir_mode: 755
- file_mode: 644
{% endif %}
{% if pillar.get('enable_cluster_ui', '').lower() == 'true' %}
/etc/kubernetes/addons/dashboard:
file.recurse:
- source: salt://kube-addons/dashboard
- include_pat: E@^.+\.yaml$
- user: root
- group: root
- dir_mode: 755
- file_mode: 644
{% endif %}
{% if pillar.get('enable_node_problem_detector', '').lower() == 'daemonset' %}
/etc/kubernetes/addons/node-problem-detector/npd.yaml:
file.managed:
- source: salt://kube-addons/node-problem-detector/npd.yaml
- user: root
- group: root
- file_mode: 644
- makedirs: True
{% endif %}
/etc/kubernetes/manifests/kube-addon-manager.yaml:
file.managed:
- source: salt://kube-addons/kube-addon-manager.yaml
- user: root
- group: root
- mode: 755
{% if pillar.get('enable_default_storage_class', '').lower() == 'true' and grains['cloud'] is defined and grains['cloud'] in ['aws', 'gce', 'openstack'] %}
/etc/kubernetes/addons/storage-class/default.yaml:
file.managed:
- source: salt://kube-addons/storage-class/{{ grains['cloud'] }}/default.yaml
- user: root
- group: root
- mode: 644
- makedirs: True
{% endif %}

View File

@ -0,0 +1,38 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-addon-manager
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
component: kube-addon-manager
spec:
hostNetwork: true
containers:
- name: kube-addon-manager
# When updating version also bump it in:
# - test/kubemark/resources/manifests/kube-addon-manager.yaml
image: gcr.io/google-containers/kube-addon-manager:v6.5
command:
- /bin/bash
- -c
- /opt/kube-addons.sh 1>>/var/log/kube-addon-manager.log 2>&1
resources:
requests:
cpu: 5m
memory: 50Mi
volumeMounts:
- mountPath: /etc/kubernetes/
name: addons
readOnly: true
- mountPath: /var/log
name: varlog
readOnly: false
volumes:
- hostPath:
path: /etc/kubernetes/
name: addons
- hostPath:
path: /var/log
name: varlog

View File

@ -0,0 +1,10 @@
{% if 'LimitRanger' in pillar.get('admission_control', '') %}
/etc/kubernetes/admission-controls/limit-range:
file.recurse:
- source: salt://kube-admission-controls/limit-range
- include_pat: E@(^.+\.yaml$|^.+\.json$)
- user: root
- group: root
- dir_mode: 755
- file_mode: 644
{% endif %}

View File

@ -0,0 +1,10 @@
apiVersion: "v1"
kind: "LimitRange"
metadata:
name: "limits"
namespace: default
spec:
limits:
- type: "Container"
defaultRequest:
cpu: "100m"

View File

@ -0,0 +1,8 @@
{% set kube_user = grains.kube_user -%}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"admin", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"{{kube_user}}", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kubelet", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kube_proxy", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kubecfg", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"client", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"group":"system:serviceaccounts", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}

View File

@ -0,0 +1,71 @@
{% if grains['cloud'] is defined and grains.cloud in ['aws', 'gce', 'vagrant', 'photon-controller', 'openstack'] %}
# TODO: generate and distribute tokens on other cloud providers.
/srv/kubernetes/known_tokens.csv:
file.managed:
- source: salt://kube-apiserver/known_tokens.csv
- user: root
- group: root
- mode: 600
# - watch_in:
# - service: kube-apiserver
/srv/kubernetes/basic_auth.csv:
file.managed:
- source: salt://kube-apiserver/basic_auth.csv
- user: root
- group: root
- mode: 600
/srv/kubernetes/abac-authz-policy.jsonl:
file.managed:
- source: salt://kube-apiserver/abac-authz-policy.jsonl
- template: jinja
- user: root
- group: root
- mode: 600
{% endif %}
/var/log/kube-apiserver.log:
file.managed:
- user: root
- group: root
- mode: 644
/var/log/kube-apiserver-audit.log:
file.managed:
- user: root
- group: root
- mode: 644
# Copy kube-apiserver manifest to manifests folder for kubelet.
# Current containervm image by default has both docker and kubelet
# running. But during cluster creation stage, docker and kubelet
# could be overwritten completely, or restarted due to flag changes.
# The ordering of salt states for service docker, kubelet and
# master-addon below is very important to avoid the race between
# salt restart docker or kubelet and kubelet start master components.
# Without the ordering of salt states, when gce instance boot up,
# configure-vm.sh will run and download the release. At the end of
# boot, run-salt will installs kube-apiserver.manifest files to
# kubelet config directory before the installation of proper version
# kubelet. Please see
# http://issue.k8s.io/10122#issuecomment-114566063
# for detail explanation on this very issue.
/etc/kubernetes/manifests/kube-apiserver.manifest:
file.managed:
- source: salt://kube-apiserver/kube-apiserver.manifest
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: true
- dir_mode: 755
- require:
- service: docker
- service: kubelet
#stop legacy kube-apiserver service
stop_kube-apiserver:
service.dead:
- name: kube-apiserver
- enable: None

View File

@ -0,0 +1,351 @@
{% set daemon_args = "$DAEMON_ARGS" -%}
{% if grains['os_family'] == 'RedHat' -%}
{% set daemon_args = "" -%}
{% endif -%}
{% set cloud_provider = "" -%}
{% set cloud_config = "" -%}
{% set cloud_config_mount = "" -%}
{% set cloud_config_volume = "" -%}
{% set additional_cloud_config_mount = "{\"name\": \"usrsharessl\",\"mountPath\": \"/usr/share/ssl\", \"readOnly\": true}, {\"name\": \"usrssl\",\"mountPath\": \"/usr/ssl\", \"readOnly\": true}, {\"name\": \"usrlibssl\",\"mountPath\": \"/usr/lib/ssl\", \"readOnly\": true}, {\"name\": \"usrlocalopenssl\",\"mountPath\": \"/usr/local/openssl\", \"readOnly\": true}," -%}
{% set additional_cloud_config_volume = "{\"name\": \"usrsharessl\",\"hostPath\": {\"path\": \"/usr/share/ssl\"}}, {\"name\": \"usrssl\",\"hostPath\": {\"path\": \"/usr/ssl\"}}, {\"name\": \"usrlibssl\",\"hostPath\": {\"path\": \"/usr/lib/ssl\"}}, {\"name\": \"usrlocalopenssl\",\"hostPath\": {\"path\": \"/usr/local/openssl\"}}," -%}
{% set srv_kube_path = "/srv/kubernetes" -%}
{% set srv_sshproxy_path = "/srv/sshproxy" -%}
{% if grains.cloud is defined -%}
{% if grains.cloud not in ['vagrant', 'photon-controller', 'azure-legacy'] -%}
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
{% endif -%}
{% if grains.cloud in [ 'openstack' ] and grains.cloud_config is defined -%}
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% endif -%}
{% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%}
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%}
{% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\", \"type\": \"FileOrCreate\"}}," -%}
{% endif -%}
{% if grains.cloud in ['openstack'] -%}
{% set cloud_config_mount = "{\"name\": \"instanceid\",\"mountPath\": \"/var/lib/cloud/data/instance-id\",\"readOnly\": true}," -%}
{% set cloud_config_volume = "{\"name\": \"instanceid\",\"hostPath\": {\"path\": \"/var/lib/cloud/data/instance-id\"}}," -%}
{% endif -%}
{% endif -%}
{% set advertise_address = "" -%}
{% if grains.advertise_address is defined -%}
{% set advertise_address = "--advertise-address=" + grains.advertise_address -%}
{% endif -%}
{% set proxy_ssh_options = "" -%}
{% if grains.proxy_ssh_user is defined -%}
{% set proxy_ssh_options = "--ssh-user=" + grains.proxy_ssh_user + " --ssh-keyfile=/srv/sshproxy/.sshkeyfile" -%}
{# Append 40 characters onto command to work around #9822. #}
{# If mount list changes, this may also need to change. #}
{% set proxy_ssh_options = proxy_ssh_options + " " -%}
{% endif -%}
{% set address = "--address=127.0.0.1" -%}
{% set bind_address = "" -%}
{% if grains.publicAddressOverride is defined -%}
{% set bind_address = "--bind-address=" + grains.publicAddressOverride -%}
{% endif -%}
{% set storage_backend = "" -%}
{% if pillar['storage_backend'] is defined -%}
{% set storage_backend = "--storage-backend=" + pillar['storage_backend'] -%}
{% endif -%}
{% set etcd_servers = "--etcd-servers=http://127.0.0.1:2379" -%}
{% set etcd_servers_overrides = "--etcd-servers-overrides=/events#http://127.0.0.1:4002" -%}
{% set storage_media_type = "" -%}
{% if pillar['storage_media_type'] is defined -%}
{% set storage_media_type = "--storage-media-type=" + pillar['storage_media_type'] -%}
{% endif -%}
{% set liveness_probe_initial_delay = pillar.get('kube_apiserver_liveness_probe_initial_delay', 15) -%}
{% set request_timeout = "" -%}
{% if pillar['kube_apiserver_request_timeout_sec'] is defined -%}
{% set request_timeout = "--request-timeout=" + pillar['kube_apiserver_request_timeout_sec'] + "s" -%}
{% endif -%}
{% set max_requests_inflight = "" -%}
{% set target_ram_mb = "" -%}
{% if pillar['num_nodes'] is defined -%}
# If the cluster is large, increase max-requests-inflight limit in apiserver.
{% if pillar['num_nodes']|int >= 1000 -%}
{% set max_requests_inflight = "--max-requests-inflight=1500 --max-mutating-requests-inflight=500" -%}
{% endif -%}
# Set amount of memory available for apiserver based on number of nodes.
# TODO: Once we start setting proper requests and limits for apiserver
# we should reuse the same logic here instead of current heuristic.
{% set tmp_ram_mb = pillar['num_nodes']|int * 60 %}
{% set target_ram_mb = "--target-ram-mb=" + tmp_ram_mb|string -%}
{% endif -%}
{% set service_cluster_ip_range = "" -%}
{% if pillar['service_cluster_ip_range'] is defined -%}
{% set service_cluster_ip_range = "--service-cluster-ip-range=" + pillar['service_cluster_ip_range'] -%}
{% endif -%}
{% set cert_file = "--tls-cert-file=/srv/kubernetes/server.cert" -%}
{% set key_file = "--tls-private-key-file=/srv/kubernetes/server.key" -%}
{% set kubelet_cert_file = "--kubelet-client-certificate=/srv/kubernetes/kubeapiserver.cert" -%}
{% set kubelet_key_file = "--kubelet-client-key=/srv/kubernetes/kubeapiserver.key" -%}
{% set client_ca_file = "" -%}
{% set secure_port = "6443" -%}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack'] %}
{% set secure_port = "443" -%}
{% set client_ca_file = "--client-ca-file=/srv/kubernetes/ca.crt" -%}
{% endif -%}
{% set min_request_timeout = "" -%}
{% if grains.minRequestTimeout is defined -%}
{% set min_request_timeout = "--min-request-timeout=" + grains.minRequestTimeout -%}
{% endif -%}
{% set token_auth_file = " --token-auth-file=/dev/null" -%}
{% set basic_auth_file = "" -%}
{% set authz_mode = "" -%}
{% set abac_policy_file = "" -%}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack'] %}
{% set token_auth_file = " --token-auth-file=/srv/kubernetes/known_tokens.csv" -%}
{% set basic_auth_file = " --basic-auth-file=/srv/kubernetes/basic_auth.csv" -%}
{% set authz_mode = " --authorization-mode=ABAC" -%}
{% set abac_policy_file = " --authorization-policy-file=/srv/kubernetes/abac-authz-policy.jsonl" -%}
{% endif -%}
{% set webhook_authentication_config = "" -%}
{% set webhook_authn_config_mount = "" -%}
{% set webhook_authn_config_volume = "" -%}
{% if grains.webhook_authentication_config is defined -%}
{% set webhook_authentication_config = " --authentication-token-webhook-config-file=" + grains.webhook_authentication_config -%}
{% set webhook_authn_config_mount = "{\"name\": \"webhookauthnconfigmount\",\"mountPath\": \"" + grains.webhook_authentication_config + "\", \"readOnly\": false}," -%}
{% set webhook_authn_config_volume = "{\"name\": \"webhookauthnconfigmount\",\"hostPath\": {\"path\": \"" + grains.webhook_authentication_config + "\", \"type\": \"FileOrCreate\"}}," -%}
{% endif -%}
{% set webhook_authorization_config = "" -%}
{% set webhook_config_mount = "" -%}
{% set webhook_config_volume = "" -%}
{% if grains.webhook_authorization_config is defined -%}
{% set webhook_authorization_config = " --authorization-webhook-config-file=" + grains.webhook_authorization_config -%}
{% set webhook_config_mount = "{\"name\": \"webhookconfigmount\",\"mountPath\": \"" + grains.webhook_authorization_config + "\", \"readOnly\": false}," -%}
{% set webhook_config_volume = "{\"name\": \"webhookconfigmount\",\"hostPath\": {\"path\": \"" + grains.webhook_authorization_config + "\", \"type\": \"FileOrCreate\"}}," -%}
{% set authz_mode = authz_mode + ",Webhook" -%}
{% endif -%}
{% set image_review_config = "" -%}
{% set admission_controller_config_mount = "" -%}
{% set admission_controller_config_volume = "" -%}
{% set image_policy_webhook_config_mount = "" -%}
{% set image_policy_webhook_config_volume = "" -%}
{% if grains.image_review_config is defined -%}
{% set image_review_config = " --admission-control-config-file=" + grains.image_review_config -%}
{% set admission_controller_config_mount = "{\"name\": \"admissioncontrollerconfigmount\",\"mountPath\": \"" + grains.image_review_config + "\", \"readOnly\": false}," -%}
{% set admission_controller_config_volume = "{\"name\": \"admissioncontrollerconfigmount\",\"hostPath\": {\"path\": \"" + grains.image_review_config + "\", \"type\": \"FileOrCreate\"}}," -%}
{% set image_policy_webhook_config_mount = "{\"name\": \"imagepolicywebhookconfigmount\",\"mountPath\": \"/etc/gcp_image_review.config\", \"readOnly\": false}," -%}
{% set image_policy_webhook_config_volume = "{\"name\": \"imagepolicywebhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_image_review.config\", \"type\": \"FileOrCreate\"}}," -%}
{% endif -%}
{% set admission_control = "" -%}
{% if pillar['admission_control'] is defined -%}
{% set admission_control = "--admission-control=" + pillar['admission_control'] -%}
{% endif -%}
{% set runtime_config = "" -%}
{% if grains.runtime_config is defined -%}
{% set runtime_config = "--runtime-config=" + grains.runtime_config -%}
{% endif -%}
{% set feature_gates = "" -%}
{% if grains.feature_gates is defined -%}
{% set feature_gates = "--feature-gates=" + grains.feature_gates -%}
{% endif -%}
{% set log_level = pillar['log_level'] -%}
{% if pillar['api_server_test_log_level'] is defined -%}
{% set log_level = pillar['api_server_test_log_level'] -%}
{% endif -%}
{% set enable_garbage_collector = "" -%}
{% if pillar['enable_garbage_collector'] is defined -%}
{% set enable_garbage_collector = "--enable-garbage-collector=" + pillar['enable_garbage_collector'] -%}
{% endif -%}
{% set etcd_quorum_read = "" %}
{% if pillar['etcd_quorum_read'] is defined -%}
{% set etcd_quorum_read = "--etcd_quorum_read=" + pillar['etcd_quorum_read'] -%}
{% endif -%}
{% set audit_log = "" -%}
{% set audit_policy_config_mount = "" -%}
{% set audit_policy_config_volume = "" -%}
{% set audit_webhook_config_mount = "" -%}
{% set audit_webhook_config_volume = "" -%}
{% if pillar['enable_apiserver_basic_audit'] is defined and pillar['enable_apiserver_basic_audit'] in ['true'] -%}
{% set audit_log = "--audit-log-path=/var/log/kube-apiserver-audit.log --audit-log-maxage=0 --audit-log-maxbackup=0 --audit-log-maxsize=2000000000" -%}
{% elif pillar['enable_apiserver_advanced_audit'] is defined and pillar['enable_apiserver_advanced_audit'] in ['true'] -%}
{% set audit_log = "--audit-policy-file=/etc/audit_policy.config" -%}
{% set audit_policy_config_mount = "{\"name\": \"auditpolicyconfigmount\",\"mountPath\": \"/etc/audit_policy.config\", \"readOnly\": true}," -%}
{% set audit_policy_config_volume = "{\"name\": \"auditpolicyconfigmount\",\"hostPath\": {\"path\": \"/etc/audit_policy.config\", \"type\": \"FileOrCreate\"}}," -%}
{% if pillar['advanced_audit_backend'] is defined and 'log' in pillar['advanced_audit_backend'] -%}
{% set audit_log = audit_log + " --audit-log-path=/var/log/kube-apiserver-audit.log --audit-log-maxage=0 --audit-log-maxbackup=0 --audit-log-maxsize=2000000000" -%}
{% endif %}
{% if pillar['advanced_audit_backend'] is defined and 'webhook' in pillar['advanced_audit_backend'] -%}
{% set audit_log = audit_log + " --audit-webhook-mode=batch" -%}
{% set audit_webhook_config_mount = "{\"name\": \"auditwebhookconfigmount\",\"mountPath\": \"/etc/audit_webhook.config\", \"readOnly\": true}," -%}
{% set audit_webhook_config_volume = "{\"name\": \"auditwebhookconfigmount\",\"hostPath\": {\"path\": \"/etc/audit_webhook.config\", \"type\": \"FileOrCreate\"}}," -%}
{% endif %}
{% endif -%}
{% set params = address + " " + storage_backend + " " + storage_media_type + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + feature_gates + " " + admission_control + " " + max_requests_inflight + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector + " " + etcd_quorum_read + " " + audit_log + " " + request_timeout -%}
{% set params = params + " " + cert_file + " " + key_file + " " + kubelet_cert_file + " " + kubelet_key_file + " --secure-port=" + secure_port + token_auth_file + " " + bind_address + " " + log_level + " " + advertise_address + " " + proxy_ssh_options + authz_mode + abac_policy_file + webhook_authentication_config + webhook_authorization_config + image_review_config -%}
# test_args has to be kept at the end, so they'll overwrite any prior configuration
{% if pillar['apiserver_test_args'] is defined -%}
{% set params = params + " " + pillar['apiserver_test_args'] -%}
{% endif -%}
{% set container_env = "" -%}
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"kube-apiserver",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": ""
},
"labels": {
"tier": "control-plane",
"component": "kube-apiserver"
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "kube-apiserver",
"image": "{{pillar['kube_docker_registry']}}/kube-apiserver:{{pillar['kube-apiserver_docker_tag']}}",
"resources": {
"requests": {
"cpu": "250m"
}
},
"command": [
"/bin/sh",
"-c",
"/usr/local/bin/kube-apiserver {{params}} --allow-privileged={{pillar['allow_privileged']}} 1>>/var/log/kube-apiserver.log 2>&1"
],
{{container_env}}
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": 8080,
"path": "/healthz"
},
"initialDelaySeconds": {{liveness_probe_initial_delay}},
"timeoutSeconds": 15
},
"ports":[
{ "name": "https",
"containerPort": {{secure_port}},
"hostPort": {{secure_port}}},{
"name": "local",
"containerPort": 8080,
"hostPort": 8080}
],
"volumeMounts": [
{{cloud_config_mount}}
{{additional_cloud_config_mount}}
{{webhook_config_mount}}
{{webhook_authn_config_mount}}
{{audit_policy_config_mount}}
{{audit_webhook_config_mount}}
{{admission_controller_config_mount}}
{{image_policy_webhook_config_mount}}
{ "name": "srvkube",
"mountPath": "{{srv_kube_path}}",
"readOnly": true},
{ "name": "logfile",
"mountPath": "/var/log/kube-apiserver.log",
"readOnly": false},
{ "name": "auditlogfile",
"mountPath": "/var/log/kube-apiserver-audit.log",
"readOnly": false},
{ "name": "etcssl",
"mountPath": "/etc/ssl",
"readOnly": true},
{ "name": "usrsharecacerts",
"mountPath": "/usr/share/ca-certificates",
"readOnly": true},
{ "name": "varssl",
"mountPath": "/var/ssl",
"readOnly": true},
{ "name": "etcopenssl",
"mountPath": "/etc/openssl",
"readOnly": true},
{ "name": "etcpki",
"mountPath": "/etc/srv/pki",
"readOnly": true},
{ "name": "srvsshproxy",
"mountPath": "{{srv_sshproxy_path}}",
"readOnly": false}
]
}
],
"volumes":[
{{cloud_config_volume}}
{{additional_cloud_config_volume}}
{{webhook_config_volume}}
{{webhook_authn_config_volume}}
{{audit_policy_config_volume}}
{{audit_webhook_config_volume}}
{{admission_controller_config_volume}}
{{image_policy_webhook_config_volume}}
{ "name": "srvkube",
"hostPath": {
"path": "{{srv_kube_path}}"}
},
{ "name": "logfile",
"hostPath": {
"path": "/var/log/kube-apiserver.log",
"type": "FileOrCreate"}
},
{ "name": "auditlogfile",
"hostPath": {
"path": "/var/log/kube-apiserver-audit.log",
"type": "FileOrCreate"}
},
{ "name": "etcssl",
"hostPath": {
"path": "/etc/ssl"}
},
{ "name": "usrsharecacerts",
"hostPath": {
"path": "/usr/share/ca-certificates"}
},
{ "name": "varssl",
"hostPath": {
"path": "/var/ssl"}
},
{ "name": "etcopenssl",
"hostPath": {
"path": "/etc/openssl"}
},
{ "name": "etcpki",
"hostPath": {
"path": "/etc/srv/pki"}
},
{ "name": "srvsshproxy",
"hostPath": {
"path": "{{srv_sshproxy_path}}"}
}
]
}}

View File

@ -0,0 +1,6 @@
/usr/local/bin/kubectl:
file.managed:
- source: salt://kube-bins/kubectl
- user: root
- group: root
- mode: 755

View File

@ -0,0 +1,30 @@
# Copy kube-controller-manager manifest to manifests folder for kubelet.
# The ordering of salt states for service docker, kubelet and
# master-addon below is very important to avoid the race between
# salt restart docker or kubelet and kubelet start master components.
# Please see http://issue.k8s.io/10122#issuecomment-114566063
# for detail explanation on this very issue.
/etc/kubernetes/manifests/kube-controller-manager.manifest:
file.managed:
- source: salt://kube-controller-manager/kube-controller-manager.manifest
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: true
- dir_mode: 755
- require:
- service: docker
- service: kubelet
/var/log/kube-controller-manager.log:
file.managed:
- user: root
- group: root
- mode: 644
stop-legacy-kube_controller_manager:
service.dead:
- name: kube-controller-manager
- enable: None

View File

@ -0,0 +1,195 @@
{% set cluster_name = "" -%}
{% set cluster_cidr = "" -%}
{% set allocate_node_cidrs = "" -%}
{% set service_cluster_ip_range = "" %}
{% set terminated_pod_gc = "" -%}
{% if pillar['instance_prefix'] is defined -%}
{% set cluster_name = "--cluster-name=" + pillar['instance_prefix'] -%}
{% endif -%}
{% if pillar['cluster_cidr'] is defined and pillar['cluster_cidr'] != "" -%}
{% set cluster_cidr = "--cluster-cidr=" + pillar['cluster_cidr'] -%}
{% endif -%}
{% if pillar['service_cluster_ip_range'] is defined and pillar['service_cluster_ip_range'] != "" -%}
{% set service_cluster_ip_range = "--service_cluster_ip_range=" + pillar['service_cluster_ip_range'] -%}
{% endif -%}
{% if pillar.get('network_provider', '').lower() == 'kubenet' %}
{% set allocate_node_cidrs = "--allocate-node-cidrs=true" -%}
{% elif pillar['allocate_node_cidrs'] is defined -%}
{% set allocate_node_cidrs = "--allocate-node-cidrs=" + pillar['allocate_node_cidrs'] -%}
{% endif -%}
{% if pillar['terminated_pod_gc_threshold'] is defined -%}
{% set terminated_pod_gc = "--terminated-pod-gc-threshold=" + pillar['terminated_pod_gc_threshold'] -%}
{% endif -%}
{% set enable_garbage_collector = "" -%}
{% if pillar['enable_garbage_collector'] is defined -%}
{% set enable_garbage_collector = "--enable-garbage-collector=" + pillar['enable_garbage_collector'] -%}
{% endif -%}
{% set cloud_provider = "" -%}
{% set cloud_config = "" -%}
{% set cloud_config_mount = "" -%}
{% set cloud_config_volume = "" -%}
{% set additional_cloud_config_mount = "{\"name\": \"usrsharessl\",\"mountPath\": \"/usr/share/ssl\", \"readOnly\": true}, {\"name\": \"usrssl\",\"mountPath\": \"/usr/ssl\", \"readOnly\": true}, {\"name\": \"usrlibssl\",\"mountPath\": \"/usr/lib/ssl\", \"readOnly\": true}, {\"name\": \"usrlocalopenssl\",\"mountPath\": \"/usr/local/openssl\", \"readOnly\": true}," -%}
{% set additional_cloud_config_volume = "{\"name\": \"usrsharessl\",\"hostPath\": {\"path\": \"/usr/share/ssl\"}}, {\"name\": \"usrssl\",\"hostPath\": {\"path\": \"/usr/ssl\"}}, {\"name\": \"usrlibssl\",\"hostPath\": {\"path\": \"/usr/lib/ssl\"}}, {\"name\": \"usrlocalopenssl\",\"hostPath\": {\"path\": \"/usr/local/openssl\"}}," -%}
{% set pv_recycler_mount = "" -%}
{% set pv_recycler_volume = "" -%}
{% set srv_kube_path = "/srv/kubernetes" -%}
{% if grains.cloud is defined -%}
{% if grains.cloud not in ['vagrant', 'photon-controller', 'azure-legacy'] -%}
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
{% endif -%}
{% set service_account_key = "--service-account-private-key-file=/srv/kubernetes/server.key" -%}
{% if grains.cloud in [ 'openstack' ] and grains.cloud_config is defined -%}
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% endif -%}
{% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%}
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%}
{% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\", \"type\": \"FileOrCreate\"}}," -%}
{% endif -%}
{% if grains.cloud in ['openstack'] -%}
{% set cloud_config_mount = "{\"name\": \"instanceid\",\"mountPath\": \"/var/lib/cloud/data/instance-id\",\"readOnly\": true}," -%}
{% set cloud_config_volume = "{\"name\": \"instanceid\",\"hostPath\": {\"path\": \"/var/lib/cloud/data/instance-id\"}}," -%}
{% endif -%}
{% endif -%}
{% set root_ca_file = "" -%}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack', 'azure-legacy'] %}
{% set root_ca_file = "--root-ca-file=/srv/kubernetes/ca.crt" -%}
{% endif -%}
{% set log_level = pillar['log_level'] -%}
{% if pillar['controller_manager_test_log_level'] is defined -%}
{% set log_level = pillar['controller_manager_test_log_level'] -%}
{% endif -%}
{% set feature_gates = "" -%}
{% if grains.feature_gates is defined -%}
{% set feature_gates = "--feature-gates=" + grains.feature_gates -%}
{% endif -%}
{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + enable_garbage_collector + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
{% set params = params + " " + feature_gates -%}
{% if pillar.get('enable_hostpath_provisioner', '').lower() == 'true' -%}
{% set params = params + " --enable-hostpath-provisioner" %}
{% endif -%}
# test_args has to be kept at the end, so they'll overwrite any prior configuration
{% if pillar['controller_manager_test_args'] is defined -%}
{% set params = params + " " + pillar['controller_manager_test_args'] -%}
{% endif -%}
{% set container_env = "" -%}
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"kube-controller-manager",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": ""
},
"labels": {
"tier": "control-plane",
"component": "kube-controller-manager"
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "kube-controller-manager",
"image": "{{pillar['kube_docker_registry']}}/kube-controller-manager:{{pillar['kube-controller-manager_docker_tag']}}",
"resources": {
"requests": {
"cpu": "200m"
}
},
"command": [
"/bin/sh",
"-c",
"/usr/local/bin/kube-controller-manager {{params}} 1>>/var/log/kube-controller-manager.log 2>&1"
],
{{container_env}}
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": 10252,
"path": "/healthz"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15
},
"volumeMounts": [
{{cloud_config_mount}}
{{additional_cloud_config_mount}}
{{pv_recycler_mount}}
{ "name": "srvkube",
"mountPath": "{{srv_kube_path}}",
"readOnly": true},
{ "name": "logfile",
"mountPath": "/var/log/kube-controller-manager.log",
"readOnly": false},
{ "name": "etcssl",
"mountPath": "/etc/ssl",
"readOnly": true},
{ "name": "usrsharecacerts",
"mountPath": "/usr/share/ca-certificates",
"readOnly": true},
{ "name": "varssl",
"mountPath": "/var/ssl",
"readOnly": true},
{ "name": "etcopenssl",
"mountPath": "/etc/openssl",
"readOnly": true},
{ "name": "etcpki",
"mountPath": "/etc/pki",
"readOnly": true}
]
}
],
"volumes":[
{{cloud_config_volume}}
{{additional_cloud_config_volume}}
{{pv_recycler_volume}}
{ "name": "srvkube",
"hostPath": {
"path": "{{srv_kube_path}}"}
},
{ "name": "logfile",
"hostPath": {
"path": "/var/log/kube-controller-manager.log",
"type": "FileOrCreate"}
},
{ "name": "etcssl",
"hostPath": {
"path": "/etc/ssl"}
},
{ "name": "usrsharecacerts",
"hostPath": {
"path": "/usr/share/ca-certificates"}
},
{ "name": "varssl",
"hostPath": {
"path": "/var/ssl"}
},
{ "name": "etcopenssl",
"hostPath": {
"path": "/etc/openssl"}
},
{ "name": "etcpki",
"hostPath": {
"path": "/etc/pki"}
}
]
}}

View File

@ -0,0 +1,52 @@
/etc/kubernetes/kube-master-addons.sh:
file.managed:
- source: salt://kube-master-addons/kube-master-addons.sh
- user: root
- group: root
- mode: 755
# Used to restart kube-master-addons service each time salt is run
# Actually, it does not work (the service is not restarted),
# but master-addon service always terminates after it does it job,
# so it is (usually) not running and it will be started when
# salt is run.
# This salt state is not removed because there is a risk
# of introducing regression in 1.0. Please remove it afterwards.
# See also the salt config for kube-addons to see how to restart
# a service on demand.
master-docker-image-tags:
file.touch:
- name: /srv/pillar/docker-images.sls
{% if pillar.get('is_systemd') %}
{{ pillar.get('systemd_system_path') }}/kube-master-addons.service:
file.managed:
- source: salt://kube-master-addons/kube-master-addons.service
- user: root
- group: root
cmd.wait:
- name: /opt/kubernetes/helpers/services bounce kube-master-addons
- watch:
- file: master-docker-image-tags
- file: /etc/kubernetes/kube-master-addons.sh
- file: {{ pillar.get('systemd_system_path') }}/kube-master-addons.service
{% else %}
/etc/init.d/kube-master-addons:
file.managed:
- source: salt://kube-master-addons/initd
- user: root
- group: root
- mode: 755
kube-master-addons:
service.running:
- enable: True
- restart: True
- watch:
- file: master-docker-image-tags
- file: /etc/kubernetes/kube-master-addons.sh
{% endif %}

View File

@ -0,0 +1,95 @@
#!/bin/bash
#
### BEGIN INIT INFO
# Provides: kube-master-addons
# Required-Start: $local_fs $network $syslog docker
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Kubernetes Master Addon Object Manager
# Description:
# Enforces installation of Kubernetes Master Addon Objects
### END INIT INFO
# PATH should only include /usr/* if it runs after the mountnfs.sh script
PATH=/sbin:/usr/sbin:/bin:/usr/bin
DESC="Kubernetes Master Addon Object Manager"
NAME=kube-master-addons
DAEMON_LOG_FILE=/var/log/$NAME.log
PIDFILE=/var/run/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME
KUBE_MASTER_ADDONS_SH=/etc/kubernetes/kube-master-addons.sh
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
# and status_of_proc is working.
. /lib/lsb/init-functions
#
# Function that starts the daemon/service
#
do_start()
{
${KUBE_MASTER_ADDONS_SH} </dev/null >>${DAEMON_LOG_FILE} 2>&1 &
echo $! > ${PIDFILE}
disown
}
#
# Function that stops the daemon/service
#
do_stop()
{
kill $(cat ${PIDFILE})
rm ${PIDFILE}
return
}
case "$1" in
start)
log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) log_end_msg 0 || exit 0 ;;
2) log_end_msg 1 || exit 1 ;;
esac
;;
stop)
log_daemon_msg "Stopping $DESC" "$NAME"
do_stop
case "$?" in
0|1) log_end_msg 0 ;;
2) exit 1 ;;
esac
;;
status)
status_of_proc -p $PIDFILE $KUBE_MASTER_ADDONS_SH $NAME
;;
restart|force-reload)
log_daemon_msg "Restarting $DESC" "$NAME"
do_stop
case "$?" in
0|1)
do_start
case "$?" in
0) log_end_msg 0 ;;
1) log_end_msg 1 ;; # Old process is still running
*) log_end_msg 1 ;; # Failed to start
esac
;;
*)
# Failed to stop
log_end_msg 1
;;
esac
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
exit 3
;;
esac

View File

@ -0,0 +1,9 @@
[Unit]
Description=Kubernetes-Master Addon Object Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/etc/kubernetes/kube-master-addons.sh
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,90 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# loadedImageFlags is a bit-flag to track which docker images loaded successfully.
function load-docker-images() {
let loadedImageFlags=0
while true; do
restart_docker=false
if which docker 1>/dev/null 2>&1; then
timeout 120 docker load -i /srv/salt/kube-bins/kube-apiserver.tar 1>/dev/null 2>&1
rc=$?
if [[ $rc == 0 ]]; then
let loadedImageFlags="$loadedImageFlags|1"
elif [[ $rc == 124 ]]; then
restart_docker=true
fi
timeout 120 docker load -i /srv/salt/kube-bins/kube-scheduler.tar 1>/dev/null 2>&1
rc=$?
if [[ $rc == 0 ]]; then
let loadedImageFlags="$loadedImageFlags|2"
elif [[ $rc == 124 ]]; then
restart_docker=true
fi
timeout 120 docker load -i /srv/salt/kube-bins/kube-controller-manager.tar 1>/dev/null 2>&1
rc=$?
if [[ $rc == 0 ]]; then
let loadedImageFlags="$loadedImageFlags|4"
elif [[ $rc == 124 ]]; then
restart_docker=true
fi
fi
# required docker images got installed. exit while loop.
if [[ $loadedImageFlags == 7 ]]; then break; fi
# Sometimes docker load hang, restart docker daemon resolve the issue
if [[ $restart_docker ]]; then
if ! service docker restart; then # Try systemctl if there's no service command.
systemctl restart docker
fi
fi
# sleep for 15 seconds before attempting to load docker images again
sleep 15
done
}
function convert-rkt-image() {
(cd /tmp; ${DOCKER2ACI_BIN} $1)
}
function load-rkt-images() {
convert-rkt-image /srv/salt/kube-bins/kube-apiserver.tar
convert-rkt-image /srv/salt/kube-bins/kube-scheduler.tar
convert-rkt-image /srv/salt/kube-bins/kube-controller-manager.tar
# Currently, we can't run docker image tarballs directly,
# So we use 'rkt fetch' to load the docker images into rkt image stores.
# see https://github.com/coreos/rkt/issues/2392.
${RKT_BIN} fetch /tmp/*.aci --insecure-options=image
}
if [[ "${KUBERNETES_CONTAINER_RUNTIME}" == "rkt" ]]; then
load-rkt-images
else
load-docker-images
fi
# Now exit. After kube-push, salt will notice that the service is down and it
# will start it and new docker images will be loaded.

View File

@ -0,0 +1,71 @@
/etc/kubernetes/kube-node-unpacker.sh:
file.managed:
- source: salt://kube-node-unpacker/kube-node-unpacker.sh
- makedirs: True
- user: root
- group: root
- mode: 755
{% if grains.cloud is defined and grains.cloud == 'gce' %}
node-docker-image-tags:
file.touch:
- name: /srv/pillar/docker-images.sls
{% else %}
kube-proxy-tar:
file.managed:
- name: /srv/salt/kube-bins/kube-proxy.tar
- source: salt://kube-bins/kube-proxy.tar
- makedirs: True
- user: root
- group: root
- mode: 644
{% endif %}
{% set is_helium = '0' %}
# Super annoying, the salt version on GCE is old enough that 'salt.cmd.run'
# isn't supported
{% if grains.cloud is defined and grains.cloud == 'aws' %}
# Salt has terrible problems with systemd on AWS too
{% set is_helium = '0' %}
{% endif %}
# Salt Helium doesn't support systemd modules for service running
{% if pillar.get('is_systemd') and is_helium == '0' %}
{{ pillar.get('systemd_system_path') }}/kube-node-unpacker.service:
file.managed:
- source: salt://kube-node-unpacker/kube-node-unpacker.service
- user: root
- group: root
cmd.wait:
- name: /opt/kubernetes/helpers/services bounce kube-node-unpacker
- watch:
{% if grains.cloud is defined and grains.cloud == 'gce' %}
- file: node-docker-image-tags
{% else %}
- file: kube-proxy-tar
{% endif %}
- file: /etc/kubernetes/kube-node-unpacker.sh
- file: {{ pillar.get('systemd_system_path') }}/kube-node-unpacker.service
{% else %}
/etc/init.d/kube-node-unpacker:
file.managed:
- source: salt://kube-node-unpacker/initd
- user: root
- group: root
- mode: 755
kube-node-unpacker:
service.running:
- enable: True
- restart: True
- watch:
{% if grains.cloud is defined and grains.cloud == 'gce' %}
- file: node-docker-image-tags
{% else %}
- file: kube-proxy-tar
{% endif %}
- file: /etc/kubernetes/kube-node-unpacker.sh
{% endif %}

View File

@ -0,0 +1,95 @@
#!/bin/bash
#
### BEGIN INIT INFO
# Provides: kube-node-unpacker
# Required-Start: $local_fs $network $syslog docker
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Kubernetes Node Unpacker
# Description:
# Unpacks docker images on Kubernetes nodes
### END INIT INFO
# PATH should only include /usr/* if it runs after the mountnfs.sh script
PATH=/sbin:/usr/sbin:/bin:/usr/bin
DESC="Kubernetes Node Unpacker"
NAME=kube-node-unpacker
DAEMON_LOG_FILE=/var/log/$NAME.log
PIDFILE=/var/run/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME
KUBE_MASTER_ADDONS_SH=/etc/kubernetes/kube-node-unpacker.sh
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
# and status_of_proc is working.
. /lib/lsb/init-functions
#
# Function that starts the daemon/service
#
do_start()
{
${KUBE_MASTER_ADDONS_SH} </dev/null >>${DAEMON_LOG_FILE} 2>&1 &
echo $! > ${PIDFILE}
disown
}
#
# Function that stops the daemon/service
#
do_stop()
{
kill $(cat ${PIDFILE})
rm ${PIDFILE}
return
}
case "$1" in
start)
log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) log_end_msg 0 || exit 0 ;;
2) log_end_msg 1 || exit 1 ;;
esac
;;
stop)
log_daemon_msg "Stopping $DESC" "$NAME"
do_stop
case "$?" in
0|1) log_end_msg 0 ;;
2) exit 1 ;;
esac
;;
status)
status_of_proc -p $PIDFILE $KUBE_MASTER_ADDONS_SH $NAME
;;
restart|force-reload)
log_daemon_msg "Restarting $DESC" "$NAME"
do_stop
case "$?" in
0|1)
do_start
case "$?" in
0) log_end_msg 0 ;;
1) log_end_msg 1 ;; # Old process is still running
*) log_end_msg 1 ;; # Failed to start
esac
;;
*)
# Failed to stop
log_end_msg 1
;;
esac
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
exit 3
;;
esac

View File

@ -0,0 +1,9 @@
[Unit]
Description=Kubernetes Node Unpacker
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/etc/kubernetes/kube-node-unpacker.sh
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,46 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# loadedImageFlags is a bit-flag to track which docker images loaded successfully.
let loadedImageFlags=0
while true; do
restart_docker=false
if which docker 1>/dev/null 2>&1; then
timeout 120 docker load -i /srv/salt/kube-bins/kube-proxy.tar 1>/dev/null 2>&1
rc=$?
if [[ "${rc}" == 0 ]]; then
let loadedImageFlags="${loadedImageFlags}|1"
elif [[ "${rc}" == 124 ]]; then
restart_docker=true
fi
fi
# required docker images got installed. exit while loop.
if [[ "${loadedImageFlags}" == 1 ]]; then break; fi
# Sometimes docker load hang, restart docker daemon resolve the issue
if [[ "${restart_docker}" ]]; then service docker restart; fi
# sleep for 15 seconds before attempting to load docker images again
sleep 15
done
# Now exit. After kube-push, salt will notice that the service is down and it
# will start it and new docker images will be loaded.

View File

@ -0,0 +1,12 @@
approvers:
- bowei
- dnardo
- freehan
- nicksardo
- mrhohn
reviewers:
- bowei
- dnardo
- freehan
- nicksardo
- mrhohn

View File

@ -0,0 +1,40 @@
/var/lib/kube-proxy/kubeconfig:
file.managed:
- source: salt://kube-proxy/kubeconfig
- user: root
- group: root
- mode: 400
- makedirs: true
# kube-proxy in a static pod
{% if pillar.get('kube_proxy_daemonset', '').lower() != 'true' %}
/etc/kubernetes/manifests/kube-proxy.manifest:
file.managed:
- source: salt://kube-proxy/kube-proxy.manifest
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: true
- dir_mode: 755
- context:
# Increasing to 100m to avoid CPU starvation on full nodes.
# Any change here should be accompanied by a proportional change in CPU
# requests of other per-node add-ons (e.g. fluentd).
cpurequest: '100m'
- require:
- service: docker
- service: kubelet
{% endif %}
/var/log/kube-proxy.log:
file.managed:
- user: root
- group: root
- mode: 644
#stop legacy kube-proxy service
stop_kube-proxy:
service.dead:
- name: kube-proxy
- enable: None

View File

@ -0,0 +1,127 @@
# Please keep kube-proxy configuration in-sync with:
# cluster/addons/kube-proxy/kube-proxy-ds.yaml
{% set kubeconfig = "--kubeconfig=/var/lib/kube-proxy/kubeconfig" -%}
{% if grains.api_servers is defined -%}
{% set api_servers = "--master=https://" + grains.api_servers -%}
{% else -%}
{% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%}
{% set api_servers = "--master=https://" + ips[0][0] -%}
{% endif -%}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack', 'azure-legacy' ] %}
{% set api_servers_with_port = api_servers -%}
{% else -%}
{% set api_servers_with_port = api_servers + ":6443" -%}
{% endif -%}
{% set test_args = "" -%}
{% if pillar['kubeproxy_test_args'] is defined -%}
{% set test_args=pillar['kubeproxy_test_args'] %}
{% endif -%}
{% set cluster_cidr = "" -%}
{% if pillar['cluster_cidr'] is defined -%}
{% set cluster_cidr=" --cluster-cidr=" + pillar['cluster_cidr'] %}
{% endif -%}
{% set log_level = pillar['log_level'] -%}
{% if pillar['kubeproxy_test_log_level'] is defined -%}
{% set log_level = pillar['kubeproxy_test_log_level'] -%}
{% endif -%}
{% set feature_gates = "" -%}
{% if grains.feature_gates is defined -%}
{% set feature_gates = "--feature-gates=" + grains.feature_gates -%}
{% endif -%}
{% set throttles = "--iptables-sync-period=1m --iptables-min-sync-period=10s --ipvs-sync-period=1m --ipvs-min-sync-period=10s" -%}
{% set pod_priority = "" -%}
{% if pillar.get('enable_pod_priority', '').lower() == 'true' -%}
{% set pod_priority = "priorityClassName: system-node-critical" -%}
{% endif -%}
# test_args should always go last to overwrite prior configuration
{% set params = log_level + " " + throttles + " " + feature_gates + " " + test_args -%}
{% set container_env = "" -%}
{% set kube_cache_mutation_detector_env_name = "" -%}
{% set kube_cache_mutation_detector_env_value = "" -%}
# kube-proxy podspec
apiVersion: v1
kind: Pod
metadata:
name: kube-proxy
namespace: kube-system
# This annotation ensures that kube-proxy does not get evicted if the node
# supports critical pod annotation based priority scheme.
# Note that kube-proxy runs as a static pod so this annotation does NOT have
# any effect on rescheduler (default scheduler and rescheduler are not
# involved in scheduling kube-proxy).
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
tier: node
component: kube-proxy
spec:
{{pod_priority}}
hostNetwork: true
tolerations:
- operator: "Exists"
effect: "NoExecute"
- operator: "Exists"
effect: "NoSchedule"
containers:
- name: kube-proxy
image: {{pillar['kube_docker_registry']}}/kube-proxy:{{pillar['kube-proxy_docker_tag']}}
resources:
requests:
cpu: {{ cpurequest }}
command:
- /bin/sh
- -c
- kube-proxy {{api_servers_with_port}} {{kubeconfig}} {{cluster_cidr}} --resource-container="" --oom-score-adj=-998 {{params}} 1>>/var/log/kube-proxy.log 2>&1
{{container_env}}
{{kube_cache_mutation_detector_env_name}}
{{kube_cache_mutation_detector_env_value}}
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/ssl/certs
name: etc-ssl-certs
readOnly: true
- mountPath: /usr/share/ca-certificates
name: usr-ca-certs
readOnly: true
- mountPath: /var/log
name: varlog
readOnly: false
- mountPath: /var/lib/kube-proxy/kubeconfig
name: kubeconfig
readOnly: false
- mountPath: /run/xtables.lock
name: iptableslock
readOnly: false
- mountPath: /lib/modules
name: lib-modules
readOnly: true
volumes:
- hostPath:
path: /usr/share/ca-certificates
name: usr-ca-certs
- hostPath:
path: /etc/ssl/certs
name: etc-ssl-certs
- hostPath:
path: /var/lib/kube-proxy/kubeconfig
type: FileOrCreate
name: kubeconfig
- hostPath:
path: /var/log
name: varlog
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: iptableslock
- name: lib-modules
hostPath:
path: /lib/modules

View File

View File

@ -0,0 +1,8 @@
/etc/kubernetes/manifests/kube-registry-proxy.yaml:
file.managed:
- source: salt://kube-registry-proxy/kube-registry-proxy.yaml
- user: root
- group: root
- mode: 644
- makedirs: True
- dir_mode: 755

View File

@ -0,0 +1,35 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-registry-proxy
namespace: kube-system
labels:
k8s-app: kube-registry
kubernetes.io/cluster-service: "true"
version: v0.4
spec:
template:
metadata:
labels:
k8s-app: kube-registry
kubernetes.io/name: "kube-registry-proxy"
kubernetes.io/cluster-service: "true"
version: v0.4
spec:
containers:
- name: kube-registry-proxy
image: gcr.io/google_containers/kube-registry-proxy:0.4
resources:
limits:
cpu: 100m
memory: 50Mi
env:
- name: REGISTRY_HOST
value: kube-registry.kube-system.svc.cluster.local
- name: REGISTRY_PORT
value: "5000"
ports:
- name: registry
containerPort: 80
hostPort: 5000

View File

@ -0,0 +1,30 @@
# Copy kube-scheduler manifest to manifests folder for kubelet.
# The ordering of salt states for service docker, kubelet and
# master-addon below is very important to avoid the race between
# salt restart docker or kubelet and kubelet start master components.
# Please see http://issue.k8s.io/10122#issuecomment-114566063
# for detail explanation on this very issue.
/etc/kubernetes/manifests/kube-scheduler.manifest:
file.managed:
- source: salt://kube-scheduler/kube-scheduler.manifest
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: true
- dir_mode: 755
- require:
- service: docker
- service: kubelet
/var/log/kube-scheduler.log:
file.managed:
- user: root
- group: root
- mode: 644
#stop legacy kube-scheduler service
stop_kube-scheduler:
service.dead:
- name: kube-scheduler
- enable: None

View File

@ -0,0 +1,89 @@
{% set params = "--master=127.0.0.1:8080" -%}
{% set srv_kube_path = "/srv/kubernetes" -%}
{% set log_level = pillar['log_level'] -%}
{% if pillar['scheduler_test_log_level'] is defined -%}
{% set log_level = pillar['scheduler_test_log_level'] -%}
{% endif -%}
{% set feature_gates = "" -%}
{% if grains.feature_gates is defined -%}
{% set feature_gates = "--feature-gates=" + grains.feature_gates -%}
{% endif -%}
{% set scheduling_algorithm_provider = "" -%}
{% if grains.scheduling_algorithm_provider is defined -%}
{% set scheduling_algorithm_provider = "--algorithm-provider=" + grains.scheduling_algorithm_provider -%}
{% endif -%}
{% set params = params + log_level + " " + feature_gates + " " + scheduling_algorithm_provider -%}
# test_args has to be kept at the end, so they'll overwrite any prior configuration
{% if pillar['scheduler_test_args'] is defined -%}
{% set params = params + " " + pillar['scheduler_test_args'] -%}
{% endif -%}
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"kube-scheduler",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": ""
},
"labels": {
"tier": "control-plane",
"component": "kube-scheduler"
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "kube-scheduler",
"image": "{{pillar['kube_docker_registry']}}/kube-scheduler:{{pillar['kube-scheduler_docker_tag']}}",
"resources": {
"requests": {
"cpu": "75m"
}
},
"command": [
"/bin/sh",
"-c",
"/usr/local/bin/kube-scheduler {{params}} 1>>/var/log/kube-scheduler.log 2>&1"
],
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": 10251,
"path": "/healthz"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15
},
"volumeMounts": [
{
"name": "logfile",
"mountPath": "/var/log/kube-scheduler.log",
"readOnly": false
},
{
"name": "srvkube",
"mountPath": "{{srv_kube_path}}",
"readOnly": true
}
]
}
],
"volumes":[
{
"name": "srvkube",
"hostPath": {"path": "{{srv_kube_path}}"}
},
{
"name": "logfile",
"hostPath": {"path": "/var/log/kube-scheduler.log", "type": "FileOrCreate"}
}
]
}}

View File

@ -0,0 +1,195 @@
{% set daemon_args = "$DAEMON_ARGS" -%}
{% if grains['os_family'] == 'RedHat' -%}
{% set daemon_args = "" -%}
{% endif -%}
# kubeconfig file
{% set require_kubeconfig = "" %}
{% if grains.kubelet_bootstrap_kubeconfig is defined -%}
{% set bootstrap_kubeconfig = "--bootstrap-kubeconfig=" + grains.kubelet_bootstrap_kubeconfig -%}
{% else -%}
{% set bootstrap_kubeconfig = "" -%}
{% endif -%}
{% if grains.kubelet_kubeconfig is defined -%}
{% set kubeconfig = "--kubeconfig=" + grains.kubelet_kubeconfig -%}
{% else -%}
{% set kubeconfig = "" -%}
{% endif -%}
{% set master_kubelet_args = "" %}
{% set debugging_handlers = "--enable-debugging-handlers=true" -%}
{% if grains['roles'][0] == 'kubernetes-master' -%}
{% if grains.cloud in ['aws', 'gce', 'vagrant', 'photon-controller', 'openstack', 'azure-legacy'] -%}
# Unless given a specific directive, disable registration for the kubelet
# running on the master.
{% if kubeconfig != "" -%}
{% set master_kubelet_args = master_kubelet_args + "--register-schedulable=false" -%}
{% endif -%}
# Disable the debugging handlers (/run and /exec) to prevent arbitrary
# code execution on the master.
# TODO(roberthbailey): Relax this constraint once the master is self-hosted.
{% set debugging_handlers = "--enable-debugging-handlers=false" -%}
{% endif -%}
{% endif -%}
{% set cloud_provider = "" -%}
{% if grains.cloud is defined and grains.cloud not in ['vagrant', 'photon-controller', 'azure-legacy'] -%}
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
{% endif -%}
{% set cloud_config = "" -%}
{% if grains.cloud in [ 'openstack' ] and grains.cloud_config is defined -%}
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% endif -%}
{% set config = "--pod-manifest-path=/etc/kubernetes/manifests" -%}
{% set manifest_url = "" -%}
{% set manifest_url_header = "" -%}
{% if pillar.get('enable_manifest_url', '').lower() == 'true' %}
{% set manifest_url = "--manifest-url=" + pillar['manifest_url'] + " --manifest-url-header=" + pillar['manifest_url_header'] -%}
{% endif -%}
{% set hostname_override = "" -%}
{% if grains.hostname_override is defined -%}
{% set hostname_override = " --hostname-override=" + grains.hostname_override -%}
{% endif -%}
{% set cluster_dns = "" %}
{% set cluster_domain = "" %}
{% if pillar.get('enable_cluster_dns', '').lower() == 'true' %}
{% set cluster_dns = "--cluster-dns=" + pillar['dns_server'] %}
{% set cluster_domain = "--cluster-domain=" + pillar['dns_domain'] %}
{% endif %}
{% set docker_root = "" -%}
{% if grains.docker_root is defined -%}
{% set docker_root = " --docker-root=" + grains.docker_root -%}
{% endif -%}
{% set kubelet_root = "" -%}
{% if grains.kubelet_root is defined -%}
{% set kubelet_root = " --root-dir=" + grains.kubelet_root -%}
{% endif -%}
{% set non_masquerade_cidr = "" -%}
{% if pillar.get('non_masquerade_cidr','') -%}
{% set non_masquerade_cidr = "--non-masquerade-cidr=" + pillar.non_masquerade_cidr -%}
{% endif -%}
# Setup cgroups hierarchies.
{% set cgroup_root = "" -%}
{% set system_container = "" -%}
{% set kubelet_container = "" -%}
{% set runtime_container = "" -%}
{% if grains['os_family'] == 'Debian' -%}
{% if pillar.get('is_systemd') %}
{% set cgroup_root = "--cgroup-root=docker" -%}
{% else %}
{% set cgroup_root = "--cgroup-root=/" -%}
{% set system_container = "--system-cgroups=/system" -%}
{% set runtime_container = "--runtime-cgroups=/docker-daemon" -%}
{% set kubelet_container= "--kubelet-cgroups=/kubelet" -%}
{% endif %}
{% endif -%}
{% if grains['oscodename'] in ['vivid','wily'] -%}
{% set cgroup_root = "--cgroup-root=docker" -%}
{% endif -%}
{% set pod_cidr = "" %}
{% if grains['roles'][0] == 'kubernetes-master' %}
{% if grains.get('cbr-cidr') %}
{% set pod_cidr = "--pod-cidr=" + grains['cbr-cidr'] %}
{% elif kubeconfig == "" and pillar.get('network_provider', '').lower() == 'kubenet' %}
# Kubelet standalone mode needs a PodCIDR since there is no controller-manager
{% set pod_cidr = "--pod-cidr=10.76.0.0/16" %}
{% endif -%}
{% endif %}
{% set cpu_cfs_quota = "" %}
{% if pillar['enable_cpu_cfs_quota'] is defined -%}
{% set cpu_cfs_quota = "--cpu-cfs-quota=" + pillar['enable_cpu_cfs_quota'] -%}
{% endif -%}
{% set feature_gates = "" -%}
{% if grains['feature_gates'] is defined -%}
{% set feature_gates = "--feature-gates=" + grains['feature_gates'] -%}
{% endif %}
{% set test_args = "" -%}
{% if pillar['kubelet_test_args'] is defined -%}
{% set test_args=pillar['kubelet_test_args'] %}
{% endif -%}
{% set network_plugin = "" -%}
{% if pillar.get('network_provider', '').lower() == 'opencontrail' %}
{% set network_plugin = "--network-plugin=opencontrail" %}
{% elif pillar.get('network_provider', '').lower() == 'cni' %}
{% set network_plugin = "--network-plugin=cni --cni-conf-dir=/etc/cni/net.d/ --cni-bin-dir=/home/kubernetes/bin/" %}
{% elif pillar.get('network_policy_provider', '').lower() == 'calico' and grains['roles'][0] != 'kubernetes-master' %}
{% set network_plugin = "--network-plugin=cni --cni-conf-dir=/etc/cni/net.d/ --cni-bin-dir=/home/kubernetes/bin/" %}
{% elif pillar.get('network_provider', '').lower() == 'kubenet' %}
{% set network_plugin = "--network-plugin=kubenet --cni-bin-dir=/home/kubernetes/bin/" -%}
{% endif -%}
# Don't pipe the --hairpin-mode flag by default. This allows the kubelet to pick
# an appropriate value.
{% set hairpin_mode = "" -%}
# The master cannot see Services because it doesn't run kube-proxy, so we don't
# need to make its container bridge promiscuous. We also don't want to set
# the hairpin-veth flag on the master because it increases the chances of
# running into the kernel bug described in #20096.
{% if grains['roles'][0] == 'kubernetes-master' -%}
{% set hairpin_mode = "--hairpin-mode=none" -%}
{% elif pillar['hairpin_mode'] is defined and pillar['hairpin_mode'] in ['promiscuous-bridge', 'hairpin-veth', 'none'] -%}
{% set hairpin_mode = "--hairpin-mode=" + pillar['hairpin_mode'] -%}
{% endif -%}
{% set kubelet_port = "" -%}
{% if pillar['kubelet_port'] is defined -%}
{% set kubelet_port="--port=" + pillar['kubelet_port'] %}
{% endif -%}
{% set log_level = pillar['log_level'] -%}
{% if pillar['kubelet_test_log_level'] is defined -%}
{% set log_level = pillar['kubelet_test_log_level'] -%}
{% endif -%}
{% set enable_custom_metrics = "" -%}
{% if pillar['enable_custom_metrics'] is defined -%}
{% set enable_custom_metrics="--enable-custom-metrics=" + pillar['enable_custom_metrics'] %}
{% endif -%}
{% set kube_proxy_ds_label = "" %}
{% if grains['roles'][0] != 'kubernetes-master' and pillar.get('kube_proxy_daemonset', '').lower() == 'true' %}
# Add kube-proxy daemonset label to node to avoid situation during cluster
# upgrade/downgrade when there are two instances of kube-proxy running on a node.
{% set kube_proxy_ds_label = "beta.kubernetes.io/kube-proxy-ds-ready=true," %}
{% endif %}
{% set node_labels = kube_proxy_ds_label + pillar['node_labels'] %}
{% if grains['roles'][0] != 'kubernetes-master' and pillar['non_master_node_labels'] is defined -%}
{% set node_labels = pillar['non_master_node_labels'] + "," + node_labels %}
{% endif %}
{% if node_labels != "" %}
{% set node_labels="--node-labels=" + node_labels %}
{% endif %}
{% set node_taints = "" %}
{% if pillar['node_taints'] is defined -%}
{% set node_taints="--register-with-taints=" + pillar['node_taints'] %}
{% endif -%}
{% set eviction_hard = "" %}
{% if pillar['eviction_hard'] is defined -%}
{% set eviction_hard="--eviction-hard=" + pillar['eviction_hard'] %}
{% endif -%}
{% set kubelet_auth = "--anonymous-auth=false --authorization-mode=Webhook --client-ca-file=" + pillar.get('ca_cert_bundle_path', '/var/lib/kubelet/ca.crt') %}
{% set pki=" --cert-dir=/var/lib/kubelet/pki" -%}
# test_args has to be kept at the end, so they'll overwrite any prior configuration
DAEMON_ARGS="{{daemon_args}} {{bootstrap_kubeconfig}} {{kubeconfig}} {{require_kubeconfig}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{cloud_config}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{log_level}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{non_masquerade_cidr}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{kubelet_port}} {{ hairpin_mode }} {{enable_custom_metrics}} {{runtime_container}} {{kubelet_container}} {{node_labels}} {{node_taints}} {{eviction_hard}} {{kubelet_auth}} {{pki}} {{feature_gates}} {{test_args}}"

View File

@ -0,0 +1,106 @@
{% if pillar.get('is_systemd') %}
{% set environment_file = '/etc/sysconfig/kubelet' %}
{% else %}
{% set environment_file = '/etc/default/kubelet' %}
{% endif %}
{{ environment_file}}:
file.managed:
- source: salt://kubelet/default
- template: jinja
- user: root
- group: root
- mode: 644
/usr/local/bin/kubelet:
file.managed:
- source: salt://kube-bins/kubelet
- user: root
- group: root
- mode: 755
/var/lib/kubelet/pki:
file.directory:
- mode: 755
- makedirs: True
# The default here is that this file is blank. If this is the case, the kubelet
# won't be able to parse it as JSON and it will not be able to publish events
# to the apiserver. You'll see a single error line in the kubelet start up file
# about this.
/var/lib/kubelet/bootstrap-kubeconfig:
file.managed:
- source: salt://kubelet/bootstrap-kubeconfig
- user: root
- group: root
- mode: 400
- makedirs: true
{% if grains.cloud != 'gce' %}
/var/lib/kubelet/ca.crt:
file.managed:
- source: salt://kubelet/ca.crt
- user: root
- group: root
- mode: 400
- makedirs: true
{% endif %}
{% if pillar.get('is_systemd') %}
{{ pillar.get('systemd_system_path') }}/kubelet.service:
file.managed:
- source: salt://kubelet/kubelet.service
- user: root
- group: root
# The service.running block below doesn't work reliably
# Instead we run our script which e.g. does a systemd daemon-reload
# But we keep the service block below, so it can be used by dependencies
# TODO: Fix this
fix-service-kubelet:
cmd.wait:
- name: /opt/kubernetes/helpers/services bounce kubelet
- watch:
- file: /var/lib/kubelet/pki
- file: /usr/local/bin/kubelet
- file: {{ pillar.get('systemd_system_path') }}/kubelet.service
- file: {{ environment_file }}
- file: /var/lib/kubelet/bootstrap-kubeconfig
{% if grains.cloud != 'gce' %}
- file: /var/lib/kubelet/ca.crt
{% endif %}
{% else %}
/etc/init.d/kubelet:
file.managed:
- source: salt://kubelet/initd
- user: root
- group: root
- mode: 755
{% endif %}
kubelet:
service.running:
- enable: True
- watch:
- file: /usr/local/bin/kubelet
{% if pillar.get('is_systemd') %}
- file: {{ pillar.get('systemd_system_path') }}/kubelet.service
{% else %}
- file: /etc/init.d/kubelet
{% endif %}
{% if grains['os_family'] == 'RedHat' %}
- file: /usr/lib/systemd/system/kubelet.service
{% endif %}
- file: {{ environment_file }}
- file: /var/lib/kubelet/bootstrap-kubeconfig
{% if grains.cloud != 'gce' %}
- file: /var/lib/kubelet/ca.crt
{% endif %}
{% if pillar.get('is_systemd') %}
- provider:
- service: systemd
{%- endif %}

View File

@ -0,0 +1,126 @@
#!/bin/bash
#
### BEGIN INIT INFO
# Provides: kubelet
# Required-Start: $local_fs $network $syslog
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: The Kubernetes node container manager
# Description:
# The Kubernetes container manager maintains docker state against a state file.
### END INIT INFO
# PATH should only include /usr/* if it runs after the mountnfs.sh script
PATH=/sbin:/usr/sbin:/bin:/usr/bin
DESC="The Kubernetes container manager"
NAME=kubelet
DAEMON=/usr/local/bin/kubelet
DAEMON_ARGS=""
DAEMON_LOG_FILE=/var/log/$NAME.log
PIDFILE=/var/run/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME
DAEMON_USER=root
# Exit if the package is not installed
[ -x "$DAEMON" ] || exit 0
# Read configuration variable file if it is present
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
# and status_of_proc is working.
. /lib/lsb/init-functions
#
# Function that starts the daemon/service
#
do_start()
{
# Avoid a potential race at boot time when both monit and init.d start
# the same service
PIDS=$(pidof $DAEMON)
for PID in ${PIDS}; do
kill -9 $PID
done
# Return
# 0 if daemon has been started
# 1 if daemon was already running
# 2 if daemon could not be started
start-stop-daemon --start --quiet --background --no-close \
--make-pidfile --pidfile $PIDFILE \
--exec $DAEMON -c $DAEMON_USER --test > /dev/null \
|| return 1
start-stop-daemon --start --quiet --background --no-close \
--make-pidfile --pidfile $PIDFILE \
--exec $DAEMON -c $DAEMON_USER -- \
$DAEMON_ARGS >> $DAEMON_LOG_FILE 2>&1 \
|| return 2
}
#
# Function that stops the daemon/service
#
do_stop()
{
# Return
# 0 if daemon has been stopped
# 1 if daemon was already stopped
# 2 if daemon could not be stopped
# other if a failure occurred
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
RETVAL="$?"
[ "$RETVAL" = 2 ] && return 2
# Many daemons don't delete their pidfiles when they exit.
rm -f $PIDFILE
return "$RETVAL"
}
case "$1" in
start)
log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) log_end_msg 0 || exit 0 ;;
2) log_end_msg 1 || exit 1 ;;
esac
;;
stop)
log_daemon_msg "Stopping $DESC" "$NAME"
do_stop
case "$?" in
0|1) log_end_msg 0 ;;
2) exit 1 ;;
esac
;;
status)
status_of_proc -p $PIDFILE "$DAEMON" "$NAME" && exit 0 || exit $?
;;
restart|force-reload)
log_daemon_msg "Restarting $DESC" "$NAME"
do_stop
case "$?" in
0|1)
do_start
case "$?" in
0) log_end_msg 0 ;;
1) log_end_msg 1 ;; # Old process is still running
*) log_end_msg 1 ;; # Failed to start
esac
;;
*)
# Failed to stop
log_end_msg 1
;;
esac
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
exit 3
;;
esac

View File

View File

@ -0,0 +1,14 @@
[Unit]
Description=Kubernetes Kubelet Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/etc/sysconfig/kubelet
ExecStart=/usr/local/bin/kubelet "$DAEMON_ARGS"
Restart=always
RestartSec=2s
StartLimitInterval=0
KillMode=process
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,6 @@
approvers:
- bowei
- nicksardo
reviewers:
- bowei
- nicksardo

View File

@ -0,0 +1,56 @@
apiVersion: v1
kind: Pod
metadata:
name: l7-lb-controller-v0.9.7
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
k8s-app: gcp-lb-controller
version: v0.9.7
kubernetes.io/name: "GLBC"
spec:
terminationGracePeriodSeconds: 600
hostNetwork: true
containers:
- image: gcr.io/google_containers/glbc:0.9.7
livenessProbe:
httpGet:
path: /healthz
port: 8086
scheme: HTTP
initialDelaySeconds: 30
# healthz reaches out to GCE
periodSeconds: 30
timeoutSeconds: 15
successThreshold: 1
failureThreshold: 5
name: l7-lb-controller
volumeMounts:
- mountPath: /etc/gce.conf
name: cloudconfig
readOnly: true
- mountPath: /var/log/glbc.log
name: logfile
readOnly: false
resources:
# Request is set to accomodate this pod alongside the other
# master components on a single core master.
# TODO: Make resource requirements depend on the size of the cluster
requests:
cpu: 10m
memory: 50Mi
command:
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
- sh
- -c
- '/glbc --verbose=true --apiserver-host=http://localhost:8080 --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1'
volumes:
- hostPath:
path: /etc/gce.conf
type: FileOrCreate
name: cloudconfig
- hostPath:
path: /var/log/glbc.log
type: FileOrCreate
name: logfile

View File

@ -0,0 +1,17 @@
/etc/kubernetes/manifests/glbc.manifest:
file.managed:
- source: salt://l7-gcp/glbc.manifest
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: true
- dir_mode: 755
/var/log/glbc.log:
file.managed:
- user: root
- group: root
- mode: 644

View File

@ -0,0 +1,13 @@
/var/log/{{ file }}.log {
rotate 5
copytruncate
missingok
notifempty
compress
maxsize 100M
daily
dateext
dateformat -%Y%m%d-%s
create 0644 root root
}

View File

@ -0,0 +1,2 @@
#!/bin/sh
logrotate /etc/logrotate.conf

View File

@ -0,0 +1,10 @@
/var/lib/docker/containers/*/*-json.log {
rotate 5
copytruncate
missingok
notifempty
compress
maxsize 10M
daily
create 0644 root root
}

View File

@ -0,0 +1,35 @@
logrotate:
pkg:
- installed
{% set logrotate_files = ['kube-scheduler', 'kube-proxy', 'kubelet', 'kube-apiserver', 'kube-apiserver-audit', 'kube-controller-manager', 'kube-addons', 'docker'] %}
{% for file in logrotate_files %}
/etc/logrotate.d/{{ file }}:
file:
- managed
- source: salt://logrotate/conf
- template: jinja
- user: root
- group: root
- mode: 644
- context:
file: {{ file }}
{% endfor %}
/etc/logrotate.d/docker-containers:
file:
- managed
- source: salt://logrotate/docker-containers
- template: jinja
- user: root
- group: root
- mode: 644
/etc/cron.hourly/logrotate:
file:
- managed
- source: salt://logrotate/cron
- template: jinja
- user: root
- group: root
- mode: 755

View File

@ -0,0 +1,11 @@
ntp:
pkg:
- installed
ntp-service:
service:
- running
- name: ntp
- watch:
- pkg: ntp

View File

@ -0,0 +1,15 @@
opencontrail-networking-master:
cmd.script:
- unless: test -f /var/log/contrail/provision_master.log
- env:
- 'OPENCONTRAIL_TAG': '{{ pillar.get('opencontrail_tag') }}'
- 'OPENCONTRAIL_KUBERNETES_TAG': '{{ pillar.get('opencontrail_kubernetes_tag') }}'
- 'OPENCONTRAIL_PUBLIC_SUBNET': '{{ pillar.get('opencontrail_public_subnet') }}'
- 'SERVICE_CLUSTER_IP_RANGE': '{{ pillar.get('service_cluster_ip_range') }}'
- source: https://raw.githubusercontent.com/juniper/contrail-kubernetes/{{ pillar.get('opencontrail_kubernetes_tag') }}/cluster/provision_master.sh
- source_hash: https://raw.githubusercontent.com/juniper/contrail-kubernetes/{{ pillar.get('opencontrail_kubernetes_tag') }}/cluster/manifests.hash
- cwd: /
- user: root
- group: root
- mode: 755
- shell: /bin/bash

View File

@ -0,0 +1,15 @@
opencontrail-networking-minion:
cmd.script:
- unless: test -f /var/log/contrail/provision_minion.log
- env:
- 'OPENCONTRAIL_TAG': '{{ pillar.get('opencontrail_tag') }}'
- 'OPENCONTRAIL_KUBERNETES_TAG': '{{ pillar.get('opencontrail_kubernetes_tag') }}'
- 'OPENCONTRAIL_PUBLIC_SUBNET': '{{ pillar.get('opencontrail_public_subnet') }}'
- 'SERVICE_CLUSTER_IP_RANGE': '{{ pillar.get('service_cluster_ip_range') }}'
- source: https://raw.githubusercontent.com/juniper/contrail-kubernetes/{{ pillar.get('opencontrail_kubernetes_tag') }}/cluster/provision_minion.sh
- source_hash: https://raw.githubusercontent.com/juniper/contrail-kubernetes/{{ pillar.get('opencontrail_kubernetes_tag') }}/cluster/manifests.hash
- cwd: /
- user: root
- group: root
- mode: 755
- shell: /bin/bash

View File

@ -0,0 +1,15 @@
/etc/kubernetes/manifests/rescheduler.manifest:
file.managed:
- source: salt://rescheduler/rescheduler.manifest
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: true
- dir_mode: 755
/var/log/rescheduler.log:
file.managed:
- user: root
- group: root
- mode: 644

View File

@ -0,0 +1,36 @@
apiVersion: v1
kind: Pod
metadata:
name: rescheduler-v0.3.1
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
k8s-app: rescheduler
version: v0.3.1
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Rescheduler"
spec:
hostNetwork: true
containers:
- image: gcr.io/google-containers/rescheduler:v0.3.1
name: rescheduler
volumeMounts:
- mountPath: /var/log/rescheduler.log
name: logfile
readOnly: false
# TODO: Make resource requirements depend on the size of the cluster
resources:
requests:
cpu: 10m
memory: 100Mi
command:
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
- sh
- -c
- '/rescheduler --running-in-cluster=false 1>>/var/log/rescheduler.log 2>&1'
volumes:
- hostPath:
path: /var/log/rescheduler.log
type: FileOrCreate
name: logfile

View File

@ -0,0 +1,24 @@
/opt/kubernetes/helpers:
file.directory:
- user: root
- group: root
- makedirs: True
- dir_mode: 755
{% if pillar.get('is_systemd') %}
/opt/kubernetes/helpers/services:
file.managed:
- source: salt://salt-helpers/services
- user: root
- group: root
- mode: 755
{% endif %}
{% if grains.get('os_family', '') == 'Debian' -%}
/opt/kubernetes/helpers/pkg:
file.managed:
- source: salt://salt-helpers/pkg-apt
- user: root
- group: root
- mode: 755
{% endif %}

View File

@ -0,0 +1,70 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Helper script that installs a package, wrapping it with a policy that
# means we won't try to start services.
set -o errexit
set -o nounset
set -o pipefail
ACTION=${1}
NAME=${2}
VERSION=${3}
SRC=${4}
if [[ -z "${ACTION}" || -z "${NAME}" || -z "${VERSION}" || -z "${SRC}" ]]; then
echo "Syntax: ${0} <action> <name> <version> <src>"
exit 1
fi
old_policy=""
function install_no_start {
# Query the existing installed version, assuming that an error means package not found
existing=`dpkg-query -W -f='${Version}' ${NAME} 2>/dev/null || echo ""`
if [[ -n "${existing}" ]]; then
if [[ "${existing}" == "${VERSION}" ]]; then
return
fi
echo "Different version of package ${NAME} installed: ${VERSION} vs ${existing}"
fi
if [[ -e "/usr/sbin/policy-rc.d" ]]; then
tmpfile=`mktemp`
mv /usr/sbin/policy-rc.d ${tmpfile}
old_policy=${tmpfile}
fi
trap cleanup EXIT
echo -e '#!/bin/sh\nexit 101' > /usr/sbin/policy-rc.d
chmod 755 /usr/sbin/policy-rc.d
echo "Installing package ${NAME} from ${SRC}"
dpkg --install ${SRC}
}
function cleanup {
rm -f /usr/sbin/policy-rc.d
if [[ -n "${old_policy}" ]]; then
mv ${old_policy} /usr/sbin/policy-rc.d
fi
}
if [[ "${ACTION}" == "install-no-start" ]]; then
install_no_start
else
echo "Unknown action: ${ACTION}"
exit 1
fi

View File

@ -0,0 +1,72 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
ACTION=${1}
SERVICE=${2}
if [[ -z "${ACTION}" || -z "${SERVICE}" ]]; then
echo "Syntax: ${0} <action> <service>"
exit 1
fi
function reload_state() {
systemctl daemon-reload
}
function start_service() {
systemctl start ${SERVICE}
}
function stop_service() {
systemctl stop ${SERVICE}
}
function enable_service() {
systemctl enable ${SERVICE}
}
function disable_service() {
systemctl disable ${SERVICE}
}
function restart_service() {
systemctl restart ${SERVICE}
}
if [[ "${ACTION}" == "up" ]]; then
reload_state
enable_service
start_service
elif [[ "${ACTION}" == "bounce" ]]; then
reload_state
enable_service
restart_service
elif [[ "${ACTION}" == "down" ]]; then
reload_state
disable_service
stop_service
elif [[ "${ACTION}" == "enable" ]]; then
reload_state
enable_service
else
echo "Unknown action: ${ACTION}"
exit 1
fi

View File

@ -0,0 +1,87 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is intended to start the docker and then loop until
# it detects a failure. It then exits, and supervisord restarts it
# which in turn restarts docker.
main() {
if ! healthy 60; then
stop_docker
start_docker
echo "waiting 30s for startup"
sleep 30
healthy 60
fi
while healthy; do
sleep 10
done
echo "Docker failed!"
exit 2
}
# Performs health check on docker. If a parameter is passed, it is treated as
# the number of seconds to keep trying for a healthy result. If none is passed
# we make only one attempt.
healthy() {
max_retry_sec="$1"
shift
starttime=$(date +%s)
while ! timeout 60 docker ps > /dev/null; do
if [[ -z "$max_retry_sec" || $(( $(date +%s) - starttime )) -gt "$max_retry_sec" ]]; then
echo "docker ps did not succeed"
return 2
else
echo "waiting 5s before retry"
sleep 5
fi
done
echo "docker is healthy"
return 0
}
stop_docker() {
/etc/init.d/docker stop
# Make sure docker gracefully terminated before start again
starttime=`date +%s`
while pidof docker > /dev/null; do
currenttime=`date +%s`
((elapsedtime = currenttime - starttime))
# after 60 seconds, forcefully terminate docker process
if test $elapsedtime -gt 60; then
echo "attempting to kill docker process with sigkill signal"
kill -9 `pidof docker` || sleep 10
else
echo "waiting clean shutdown"
sleep 10
fi
done
}
start_docker() {
echo "docker is not running. starting docker"
# cleanup docker network checkpoint to avoid running into known issue
# of docker (https://github.com/docker/docker/issues/18283)
rm -rf /var/lib/docker/network
/etc/init.d/docker start
}
main

View File

@ -0,0 +1,6 @@
[program:docker]
command=/usr/sbin/docker-checker.sh
stderr_logfile=/var/log/supervisor/docker-stderr.log
stdout_logfile=/var/log/supervisor/docker-stdout.log
autorestart=true
startretries=1000000

View File

@ -0,0 +1,102 @@
{% if not pillar.get('is_systemd') %}
supervisor:
pkg:
- installed
monit:
pkg:
- purged
/etc/supervisor/conf.d/docker.conf:
file:
- managed
- source: salt://supervisor/docker.conf
- user: root
- group: root
- mode: 644
- makedirs: True
- require_in:
- pkg: supervisor
- require:
- file: /usr/sbin/docker-checker.sh
/usr/sbin/docker-checker.sh:
file:
- managed
- source: salt://supervisor/docker-checker.sh
- user: root
- group: root
- mode: 755
- makedirs: True
/etc/supervisor/conf.d/kubelet.conf:
file:
- managed
- source: salt://supervisor/kubelet.conf
- user: root
- group: root
- mode: 644
- makedirs: True
- require_in:
- pkg: supervisor
- require:
- file: /usr/sbin/kubelet-checker.sh
/usr/sbin/kubelet-checker.sh:
file:
- managed
- source: salt://supervisor/kubelet-checker.sh
- template: jinja
- user: root
- group: root
- mode: 755
- makedirs: True
{% if grains['roles'][0] == 'kubernetes-master' -%}
/etc/supervisor/conf.d/kube-addons.conf:
file:
- managed
- source: salt://supervisor/kube-addons.conf
- user: root
- group: root
- mode: 644
- makedirs: True
- require_in:
- pkg: supervisor
- require:
- file: /usr/sbin/kube-addons-checker.sh
/usr/sbin/kube-addons-checker.sh:
file:
- managed
- source: salt://supervisor/kube-addons-checker.sh
- user: root
- group: root
- mode: 755
- makedirs: True
{% endif %}
/etc/supervisor/supervisor_watcher.sh:
file.managed:
- source: salt://supervisor/supervisor_watcher.sh
- user: root
- group: root
- mode: 755
- makedirs: True
crontab -l | { cat; echo "* * * * * /etc/supervisor/supervisor_watcher.sh 2>&1 | logger"; } | crontab -:
cmd.run:
- unless: crontab -l | grep "* * * * * /etc/supervisor/supervisor_watcher.sh 2>&1 | logger"
supervisor-service:
service:
- running
- name: supervisor
- watch:
- pkg: supervisor
- file: /etc/supervisor/conf.d/*
- require:
- pkg: supervisor
{% endif %}

View File

@ -0,0 +1,34 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is intended to start the kube-addons and then loop until
# it detects a failure. It then exits, and supervisord restarts it
# which in turn restarts the kube-addons.
/etc/init.d/kube-addons stop
/etc/init.d/kube-addons start
echo "waiting a minute for startup"
sleep 60
while true; do
if ! /etc/init.d/kube-addons status > /dev/null; then
echo "kube-addons failed!"
exit 2
fi
sleep 10
done

View File

@ -0,0 +1,6 @@
[program:kube-addons]
command=/usr/sbin/kube-addons-checker.sh
stderr_logfile=/var/log/supervisor/kube-addons-stderr.log
stdout_logfile=/var/log/supervisor/kube-addons-stdout.log
autorestart=true
startretries=1000000

View File

@ -0,0 +1,37 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is intended to start the kubelet and then loop until
# it detects a failure. It then exits, and supervisord restarts it
# which in turn restarts the kubelet.
/etc/init.d/kubelet stop
/etc/init.d/kubelet start
echo "waiting a minute for startup"
sleep 60
max_seconds=10
while true; do
if ! curl -m ${max_seconds} -f -s http://127.0.0.1:10255/healthz > /dev/null; then
echo "kubelet failed!"
curl http://127.0.0.1:10255/healthz
exit 2
fi
sleep 10
done

View File

@ -0,0 +1,6 @@
[program:kubelet]
command=/usr/sbin/kubelet-checker.sh
stderr_logfile=/var/log/supervisor/kubelet-stderr.log
stdout_logfile=/var/log/supervisor/kubelet-stdout.log
autorestart=true
startretries=1000000

View File

@ -0,0 +1,34 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is invoked by crond every minute to check if supervisord is
# up and oom protected. If down it restarts supervisord; otherwise, it exits
# after applying oom_score_adj
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
if ! /etc/init.d/supervisor status > /dev/null; then
service supervisor start
sleep 10
fi
# Apply oom_score_adj: -901 to processes
pids=$(cat /var/run/supervisord.pid)
for pid in "${pids}"; do
echo -901 > /proc/$pid/oom_score_adj
done

79
vendor/k8s.io/kubernetes/cluster/saltbase/salt/top.sls generated vendored Normal file
View File

@ -0,0 +1,79 @@
base:
'*':
- base
- debian-auto-upgrades
- salt-helpers
{% if grains.get('cloud') == 'aws' %}
- ntp
{% endif %}
{% if pillar.get('e2e_storage_test_environment', '').lower() == 'true' %}
- e2e
{% endif %}
'roles:kubernetes-pool':
- match: grain
- docker
{% if pillar.get('network_policy_provider', '').lower() == 'calico' %}
- cni
{% elif pillar.get('network_provider', '').lower() == 'kubenet' %}
- cni
{% elif pillar.get('network_provider', '').lower() == 'cni' %}
- cni
{% endif %}
- helpers
- kube-client-tools
- kube-node-unpacker
- kubelet
{% if pillar.get('network_provider', '').lower() == 'opencontrail' %}
- opencontrail-networking-minion
{% else %}
- kube-proxy
{% endif %}
{% if pillar.get('enable_cluster_registry', '').lower() == 'true' %}
- kube-registry-proxy
{% endif %}
{% if pillar['prepull_e2e_images'] is defined and pillar['prepull_e2e_images'].lower() == 'true' %}
- e2e-image-puller
{% endif %}
- logrotate
- supervisor
{% if pillar.get('network_policy_provider', '').lower() == 'calico' %}
- calico.node
{% endif %}
'roles:kubernetes-master':
- match: grain
- generate-cert
- etcd
{% if pillar.get('network_provider', '').lower() == 'kubenet' %}
- cni
{% elif pillar.get('network_provider', '').lower() == 'cni' %}
- cni
{% endif %}
{% if pillar.get('enable_l7_loadbalancing', '').lower() == 'glbc' %}
- l7-gcp
{% endif %}
- kube-apiserver
- kube-controller-manager
- kube-scheduler
- supervisor
- kube-client-tools
- kube-master-addons
- kube-admission-controls
{% if grains['cloud'] is defined and grains['cloud'] != 'vagrant' %}
- logrotate
{% endif %}
- kube-addons
{% if grains['cloud'] is defined and grains['cloud'] in [ 'vagrant', 'gce', 'aws', 'photon-controller', 'openstack', 'azure-legacy'] %}
- docker
- kubelet
{% endif %}
{% if pillar.get('network_provider', '').lower() == 'opencontrail' %}
- opencontrail-networking-master
{% endif %}
{% if pillar.get('enable_cluster_autoscaler', '').lower() == 'true' %}
- cluster-autoscaler
{% endif %}
{% if pillar.get('enable_rescheduler', '').lower() == 'true' %}
- rescheduler
{% endif %}