config/configs/node.yaml
2023-05-15 16:40:08 +02:00

347 lines
8.4 KiB
YAML

root_user:
password_hash: ""
authorized_keys:
{{- range .vars.ssh_keys }}
- "{{ . }}"
{{- end }}
{{- if .vars.extra_ssh_keys }}
{{- range .vars.extra_ssh_keys }}
- "{{ . }}"
{{- end }}
{{- end }}
layers: # it's TOP to bottom
- kubernetes
- init
- modules
- system
{{ if .vars.modules -}}
modules:
{{- range .vars.modules }}
- {{ . }}
{{- end }}
{{- end }}
mounts:
- dev: /dev/{{ if .vars.encrypt_disks }}mapper{{ else }}storage{{ end }}/varlog
path: /var/log
- dev: /dev/{{ if .vars.encrypt_disks }}mapper{{ else }}storage{{ end }}/kubelet
path: /var/lib/kubelet
- dev: /dev/{{ if .vars.encrypt_disks }}mapper{{ else }}storage{{ end }}/containerd
path: /var/lib/containerd
{{ if .vars.is_master }}
- dev: /dev/{{ if .vars.encrypt_disks }}mapper{{ else }}storage{{ end }}/etcd
path: /var/lib/etcd
{{ end }}
files:
- path: /etc/machine-id
content: |
{{ machine_id }}
- path: /etc/rc.conf
content: |
rc_shell=/sbin/sulogin
rc_logger="YES"
#rc_log_path="/var/log/rc.log"
unicode="YES"
rc_tty_number=12
rc_cgroup_mode="legacy"
rc_cgroup_memory_use_hierarchy="YES"
rc_controller_cgroups="YES"
- path: /etc/hostname
content: "{{.host.name}}\n"
- path: /etc/hosts
content: |
127.0.0.1 localhost {{.host.name}}{{ if not .vars.public_vip }} kubernetes{{end}}
::1 localhost {{.host.name}}{{ if not .vars.public_vip }} kubernetes{{end}}
{{ if .vars.public_vip }}
{{ .vars.public_vip }} kubernetes
{{ end }}
{{ if .vars.extra_hosts }}
{{ range .vars.extra_hosts }}
{{ . }}
{{ end -}}
{{ end }}
- path: /etc/resolv.conf
content: |
{{- range .vars.dns }}
nameserver {{ . }}
{{- end }}
- path: /etc/sysctl.conf
content: |
fs.file-max = 20971520
fs.inotify.max_user_watches = 1048576
kernel.pid_max = 1048576
net.ipv4.ip_forward = 1
vm.max_map_count = 262144
net.ipv4.neigh.default.gc_thresh1 = 16384
net.ipv4.neigh.default.gc_thresh2 = 28672
net.ipv4.neigh.default.gc_thresh3 = 32768
# -------------------------------------------------------------------------
{{ ssh_host_keys "/etc/ssh" }}
# ------------------------------------------------------------------------
{{ if .vars.is_master }}
# certificates for etcd servers
{{ tls_dir "etcd-server" }}
{{ tls_dir "etcd-peer" }}
# certificates for etcd clients
{{ tls_dir "etcd-client" }}
# cluster certificates
{{ ca_dir "cluster" }}
{{ ca_dir "service-accounts" }}
{{ tls_dir "apiserver" }}
{{ tls_dir "kubelet-client" }}
{{ tls_dir "proxy-client" }}
{{ end }}
{{ tls_dir "cluster-client" }}
{{ if .vars.is_master -}}
- path: /etc/kubernetes/token-auth.csv
mode: 0600
content: |
{{ token "bootstrap" }},kubelet-bootstrap,10001,"system:bootstrappers"
{{ token "admin" }},admin-token,10002,"system:masters"
{{- end }}
# ------------------------------------------------------------------------
- path: /etc/chrony/chrony.conf
mode: 0644
content: |
{{ if .vars.ntp_servers -}}
{{ range .vars.ntp_servers -}}
server {{ . }} iburst
{{ end -}}
{{ else -}}
server 0.gentoo.pool.ntp.org iburst
server 1.gentoo.pool.ntp.org iburst
server 2.gentoo.pool.ntp.org iburst
server 3.gentoo.pool.ntp.org iburst
{{- end }}
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
# ------------------------------------------------------------------------
- path: /etc/direktil/services/k8s-local-volumes
mode: 0755
content: |
#! /bin/sh
# ---
# restart: 3
while true
do
for dev in /dev/storage/k8s-pv-*
do
[ -e $dev ] || continue
tgt=${dev/dev/mnt}
[ -e $tgt ] || {
mkdir -p $(dirname $tgt)
ln -s $dev $tgt
}
done
for dev in /dev/mapper/k8s-pv-*
do
[ -e $dev ] || continue
tgt=/mnt/storage/mapper__$(basename $dev)
[ -e $tgt ] || {
mkdir -p $(dirname $tgt)
ln -s $dev $tgt
}
done
sleep 10
done
# ------------------------------------------------------------------------
- path: /etc/direktil/services/containerd
mode: 0755
content: |
#! /bin/bash
# ---
# restart: 3
# provides:
# - k8s-runtime
set -ex
ulimit -n 1048576
ulimit -u unlimited
ulimit -c unlimited
{{ if .vars.proxy -}}
export HTTP_PROXY={{.vars.proxy}}
export HTTPS_PROXY="$HTTP_PROXY"
export NO_PROXY="192.168.0.0/16,172.16.0.0/12,10.0.0.0/8"
{{- end }}
exec /usr/bin/containerd \
--log-level info
# ------------------------------------------------------------------------
- path: /etc/direktil/services/kubelet
mode: 0755
content: |
#! /bin/sh
# ---
# restart: 3
# needs:
# - k8s-runtime
set -ex
ctr_sock="/run/containerd/containerd.sock"
echo "waiting for $ctr_sock"
while ! [ -e $ctr_sock ]; do sleep 1; done
#ulimit -n 1048576
mkdir -p /var/lib/kubelet/manifests
exec /usr/bin/kubelet \
--config=/etc/kubernetes/kubelet.yaml \
{{- if .vars.hostname_override }}
--hostname-override={{.vars.hostname_override}} \
{{- end }}
{{- range $k, $v := .labels }}
--node-labels={{ $k }}={{$v}} \
{{- end }}
--container-runtime-endpoint=unix://$ctr_sock \
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \
--kubeconfig=/var/lib/kubelet/kubeconfig \
--hostname-override={{.host.name}} \
--node-ip={{.host.ip}}
# -------------------------------------------------------------------------
{{ $podPidsLimit := 4096 -}}
- path: /etc/kubernetes/kubelet.yaml
mode: 0600
content: |
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
staticPodPath: /var/lib/kubelet/manifests
makeIPTablesUtilChains: {{ eq .vars.kube_proxy "proxy" }}
clusterDomain: {{.cluster.domain}}
clusterDNS:
- {{.cluster.dns_svc_ip }}
podCIDR: {{.cluster.subnets.pods}}
address: 0.0.0.0
authentication:
x509:
clientCAFile: /etc/tls/cluster-client/ca.crt
anonymous:
enabled: false
maxPods: 220
serializeImagePulls: false
featureGates: {}
serverTLSBootstrap: true
rotateCertificates: true
podPidsLimit: {{ $podPidsLimit }}
containerLogMaxFiles: 2
containerLogMaxSize: 16Mi
# cgroups configuration
cgroupsPerQOS: true
cgroupDriver: cgroupfs
systemReservedCgroup: openrc
systemReserved:
cpu: "{{ .vars.system_reserved.cpu }}"
memory: "{{ .vars.system_reserved.memory }}"
kubeReservedCgroup: podruntime
kubeReserved:
cpu: "{{ .vars.kube_reserved.cpu }}"
memory: "{{ .vars.kube_reserved.memory }}"
#evictionHard:
# memory.available: 100Mi
- path: /etc/kubernetes/haproxy-api.cfg
content: |
frontend k8s-api
bind 127.0.0.1:6444
bind [::1]:6444
mode tcp
default_backend k8s-api
backend k8s-api
mode tcp
option tcp-check
balance random
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
{{- $apiPort := .vars.control_plane.api_port -}}
{{- range $i, $host := hosts_by_group "master" }}
server {{$host.name}}_0 {{$host.ip}}:{{ $apiPort }} check
{{- end }}
- path: /etc/kubernetes/bootstrap.kubeconfig
mode: 0600
content: |
apiVersion: v1
kind: Config
preferences: {}
current-context: local
clusters:
- cluster:
certificate-authority: /etc/tls/cluster-client/ca.crt
server: https://[::1]:6444
name: local
contexts:
- context:
cluster: local
user: kubelet-bootstrap
name: local
users:
- name: kubelet-bootstrap
user:
token: {{ token "bootstrap" }}
- path: /etc/kubernetes/control-plane/kubeconfig
mode: 0600
content: |
apiVersion: v1
kind: Config
preferences: {}
current-context: local
clusters:
- cluster:
certificate-authority: /etc/tls/cluster-client/ca.crt
server: https://[::1]:6444
name: local
contexts:
- context:
cluster: local
user: control-plane
name: local
users:
- name: control-plane
user:
token: {{ token "admin" }}
{{ static_pods_files "/etc/kubernetes/manifests.static" }}