Compare commits
2 Commits
973db6fba8
...
83defa740f
Author | SHA1 | Date | |
---|---|---|---|
|
83defa740f | ||
|
8675219622 |
6
.gitignore
vendored
6
.gitignore
vendored
@ -1 +1,7 @@
|
|||||||
.*sw[po]
|
.*sw[po]
|
||||||
|
cache
|
||||||
|
data
|
||||||
|
dist
|
||||||
|
kubeconfig
|
||||||
|
config.yaml
|
||||||
|
.dls_adm_token
|
||||||
|
@ -27,7 +27,10 @@ vars:
|
|||||||
netmask: 24
|
netmask: 24
|
||||||
gateway: 172.17.1.8
|
gateway: 172.17.1.8
|
||||||
dns:
|
dns:
|
||||||
- 172.17.1.1
|
#- 172.17.1.1
|
||||||
|
- 208.67.220.220
|
||||||
|
|
||||||
|
dls_base_url: http://172.17.1.8:7606
|
||||||
|
|
||||||
etcd:
|
etcd:
|
||||||
image: quay.io/coreos/etcd
|
image: quay.io/coreos/etcd
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!include hosts/common
|
#!include hosts.incl/common
|
||||||
|
|
||||||
labels:
|
labels:
|
||||||
"node-role.novit.io/master": "true"
|
"node-role.novit.io/master": "true"
|
@ -1,4 +1,4 @@
|
|||||||
#!include common
|
#!include hosts.incl/common
|
||||||
|
|
||||||
labels:
|
labels:
|
||||||
"node-role.novit.io/node": "true"
|
"node-role.novit.io/node": "true"
|
@ -1,6 +0,0 @@
|
|||||||
ip: 10.xx.xx.xx
|
|
||||||
ips:
|
|
||||||
- 127.0.0.1
|
|
||||||
- ::1
|
|
||||||
cluster: master
|
|
||||||
group: master
|
|
4
hosts/m1.yaml
Normal file
4
hosts/m1.yaml
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
#!include hosts.incl/master
|
||||||
|
|
||||||
|
ip: 172.17.1.100
|
||||||
|
cluster: base
|
19
kubeconfig
Executable file
19
kubeconfig
Executable file
@ -0,0 +1,19 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
clusters:
|
||||||
|
- cluster:
|
||||||
|
server: https://172.17.1.100:6443
|
||||||
|
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNNakNDQVpPZ0F3SUJBZ0lVZFM5bU5PenhscGJBdXVWcWYyQjIydy96MzdVd0NnWUlLb1pJemowRUF3UXcKTXpFUk1BOEdBMVVFQ2hNSWJtOTJhWFF1YVc4eEhqQWNCZ05WQkFNVEZVUnBjbVZyZEdsc0lFeHZZMkZzSUZObApjblpsY2pBZUZ3MHlNekExTVRneE1UQXpNREJhRncweU9EQTFNVFl4TVRBek1EQmFNRE14RVRBUEJnTlZCQW9UCkNHNXZkbWwwTG1sdk1SNHdIQVlEVlFRREV4VkVhWEpsYTNScGJDQk1iMk5oYkNCVFpYSjJaWEl3Z1pzd0VBWUgKS29aSXpqMENBUVlGSzRFRUFDTURnWVlBQkFBOXdac3h0U2l2RUhUQW50aUx4WFFkUmdxR05sempRMGYwelowQwpyc3hmSGF1K0xrQ1JZNDFoN29ZY21XdVArK09FUlNOL3BGUjhNL3lCZ1dRR0JBbDd2Z0JhZEtpZ01xcHMyalhXCmxUTmt1dnJJbzZhMitESWpjTWNITUhyUUpNUFpzSm8vWGRwOWIyaW5yM2U4dnBiUjFEeEwwRDJMcy9WaWFZL2EKWEE1akR0ZHp2YU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0VHTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3SFFZRApWUjBPQkJZRUZIdUpKVjVwN0EzdEhWWDkrb2dkQ1IxZkVFbFBNQW9HQ0NxR1NNNDlCQU1FQTRHTUFEQ0JpQUpDCkFQZHZlOU9nRjFmTjE5T2VjTXc0WEhSVFBGcmMvVHp6SFA0WmlRQVRKdUlLMS9hVEozK1k4QUd5b3ByNElXbmsKUWZZeWtVVmpFREJIVTFLN1JWU1h3YjdJQWtJQnd5K2FORVd6N0hxZE5QQTRKZXV2M3ZKUGFXdU9vRElERWxoMwora3o0ZjFpUkR5QVFKbXZta2dxRzA1M214RmRZU2VIU0NpQ2hhN242RG5kUTgveDYwODA9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
|
||||||
|
name: localconfig
|
||||||
|
contexts:
|
||||||
|
- context:
|
||||||
|
cluster: localconfig
|
||||||
|
namespace: kube-system
|
||||||
|
user: admin
|
||||||
|
name: localconfig
|
||||||
|
current-context: localconfig
|
||||||
|
kind: Config
|
||||||
|
preferences: {}
|
||||||
|
users:
|
||||||
|
- name: admin
|
||||||
|
user:
|
||||||
|
token: DU3ITMWLQN2TVGVDTWIURJ42S4
|
128
scripts/.common
Normal file
128
scripts/.common
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
## Vars
|
||||||
|
|
||||||
|
tknfile=".dls_adm_token"
|
||||||
|
|
||||||
|
## Helper funcs
|
||||||
|
|
||||||
|
|
||||||
|
pinfo() {
|
||||||
|
echo -e "\e[32m$@\e[39m"
|
||||||
|
}
|
||||||
|
|
||||||
|
perror() {
|
||||||
|
echo -e "\e[31m$@\e[39m"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
dls() {
|
||||||
|
path=$1
|
||||||
|
shift
|
||||||
|
if [ -z "$DLS_ADM_TOKEN" ]; then
|
||||||
|
test -f $tknfile && DLS_ADM_TOKEN=$(cat $tknfile)
|
||||||
|
fi
|
||||||
|
curl -s -H 'Content-Type: application/json' -H 'Authorization: '$DLS_ADM_TOKEN http://127.0.0.1:7606${path} "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
kctl() {
|
||||||
|
kubectl --kubeconfig $ctxdir/kubeconfig "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
extract_var() {
|
||||||
|
where=$1
|
||||||
|
what=$2
|
||||||
|
grep -rh " $what: " $ctxdir/$where/*.yaml|awk '{print $2}'
|
||||||
|
}
|
||||||
|
|
||||||
|
## Run funcs
|
||||||
|
|
||||||
|
check_root() {
|
||||||
|
[ $UID != 0 ] && perror This program needs to be run as root. Aborting...
|
||||||
|
}
|
||||||
|
|
||||||
|
prereqs() {
|
||||||
|
set -ex
|
||||||
|
cd "$(dirname $0)/.."
|
||||||
|
ctxdir="$PWD"
|
||||||
|
}
|
||||||
|
|
||||||
|
check_conf() {
|
||||||
|
all_clusters=$(ls $ctxdir/clusters|wc -l)
|
||||||
|
if [ "$all_clusters" != "1" ]; then
|
||||||
|
perror "Those helper scripts are not capable of running several clusters at once, check your configuration. Aborting..."
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
fresh_start() {
|
||||||
|
rm -rf $ctxdir/secrets
|
||||||
|
}
|
||||||
|
|
||||||
|
unlock_store() {
|
||||||
|
# Unlock DLS store after checking if online
|
||||||
|
#
|
||||||
|
if [ -f $tknfile ]; then
|
||||||
|
DLS_ADM_TOKEN="$(cat $tknfile)"
|
||||||
|
fi
|
||||||
|
store_state=$(curl -H 'Content-Type: application/json' -H "Authorization: $DLS_ADM_TOKEN" -sw %{http_code} localhost:7606/hosts -o /dev/null)
|
||||||
|
if [ "$store_state" == "000" ] ; then
|
||||||
|
perror "Direktil Local Server seems not up, please fix. Aborting."
|
||||||
|
elif [ "$store_state" == "200" ] ; then
|
||||||
|
pinfo "Direktil Local Server store already unlocked"
|
||||||
|
else
|
||||||
|
pinfo "Unlocking the DLS store ..."
|
||||||
|
DLS_ADM_TOKEN=$(dls /public/unlock-store -d "\"${DLS_UNLOCK_TOKEN}\""|tr -d \")
|
||||||
|
pinfo "Admin access token is $DLS_ADM_TOKEN"
|
||||||
|
echo $DLS_ADM_TOKEN > $tknfile
|
||||||
|
chmod 444 $tknfile
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
get_hosts() {
|
||||||
|
hosts_files=$(basename $ctxdir/hosts/*.yaml|sed 's/.yaml//')
|
||||||
|
for h in ${hosts_files}; do
|
||||||
|
ip=$(grep ip: $ctxdir/hosts/${h}.yaml|awk '{print $2}')
|
||||||
|
hosts[$h]="$ip"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
get_parts() {
|
||||||
|
for host in ${!hosts[*]}; do
|
||||||
|
mkdir -p $ctxdir/data/$host
|
||||||
|
for part in kernel initrd-v2
|
||||||
|
do
|
||||||
|
partfile=$ctxdir/data/$host/$part
|
||||||
|
test -f $partfile || dls /hosts/$host/$part -o $partfile
|
||||||
|
done
|
||||||
|
diskfile=$ctxdir/data/$host/disk
|
||||||
|
test -f $diskfile || truncate -s ${QEMU_DISK_SIZE:-30G} $diskfile
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
create_kubeconfig() {
|
||||||
|
if test -f $ctxdir/kubeconfig; then
|
||||||
|
pinfo "kubeconfig file detected in config dir, won't overwrite... remove it for an update."
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
adm_token=$(dls /clusters/base/tokens/admin)
|
||||||
|
ca_cert=$(dls /clusters/base/CAs/cluster/certificate|base64 -w0)
|
||||||
|
vip=$(extract_var clusters public_vip)
|
||||||
|
vip_api_port=$(extract_var clusters api_port)
|
||||||
|
pinfo "Writing new kubeconfig conf in $ctxdir directory, you may want to move it to \~/.kube/ directory for usability"
|
||||||
|
sed -e "s/__VIP_IP__/$vip/" \
|
||||||
|
-e "s/__VIP_API_PORT__/$vip_api_port/" \
|
||||||
|
-e "s/__CA_CERT__/$ca_cert/" \
|
||||||
|
-e "s/__ADM_TOKEN__/$adm_token/" \
|
||||||
|
scripts/.template.kubeconfig > $ctxdir/kubeconfig
|
||||||
|
chmod 444 $ctxdir/kubeconfig
|
||||||
|
}
|
||||||
|
|
||||||
|
clean() {
|
||||||
|
set +e
|
||||||
|
sudo iptables -t nat -D POSTROUTING -j MASQUERADE -s $QEMU_BR_IP \! -o $QEMU_BR_NAME &>/dev/null
|
||||||
|
sudo iptables -D FORWARD -o $QEMU_BR_NAME -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT &>/dev/null
|
||||||
|
sudo iptables -D FORWARD -j ACCEPT -i $QEMU_BR_NAME &>/dev/null
|
||||||
|
}
|
||||||
|
|
19
scripts/.template.kubeconfig
Normal file
19
scripts/.template.kubeconfig
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
clusters:
|
||||||
|
- cluster:
|
||||||
|
server: https://__VIP_IP__:__VIP_API_PORT__
|
||||||
|
certificate-authority-data: __CA_CERT__
|
||||||
|
name: localconfig
|
||||||
|
contexts:
|
||||||
|
- context:
|
||||||
|
cluster: localconfig
|
||||||
|
namespace: kube-system
|
||||||
|
user: admin
|
||||||
|
name: localconfig
|
||||||
|
current-context: localconfig
|
||||||
|
kind: Config
|
||||||
|
preferences: {}
|
||||||
|
users:
|
||||||
|
- name: admin
|
||||||
|
user:
|
||||||
|
token: __ADM_TOKEN__
|
49
scripts/0.start_dls.sh
Executable file
49
scripts/0.start_dls.sh
Executable file
@ -0,0 +1,49 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
D2C_CTR_NAME=dir2config
|
||||||
|
DLS_CTR_NAME=dls
|
||||||
|
# Admin token to unlock the DLS store
|
||||||
|
DLS_UNLOCK_TOKEN=changeme
|
||||||
|
#
|
||||||
|
#
|
||||||
|
D2C_IMG=novit.tech/direktil/local-server:b6fa941
|
||||||
|
DLS_IMG=novit.tech/direktil/local-server:b6fa941
|
||||||
|
|
||||||
|
prereqs_dls() {
|
||||||
|
command -v docker 1>/dev/null || perror "Docker is needed, please install it and run again."
|
||||||
|
systemctl is-active docker &>/dev/null || systemctl start docker
|
||||||
|
docker pull $DLS_IMG
|
||||||
|
}
|
||||||
|
|
||||||
|
dir2config() {
|
||||||
|
pinfo "Generating config.yaml from Direktil configuration"
|
||||||
|
docker run --rm --name $D2C_CTR_NAME \
|
||||||
|
-v .:/var/lib/direktil -w /var/lib/direktil \
|
||||||
|
--entrypoint=/bin/dkl-dir2config \
|
||||||
|
$D2C_IMG
|
||||||
|
}
|
||||||
|
|
||||||
|
start_store() {
|
||||||
|
if docker ps|grep " $DLS_CTR_NAME$" ; then
|
||||||
|
pinfo "Container $DLS_CTR_NAME seems already running"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
docker run --rm --name $DLS_CTR_NAME -p 7606:7606 \
|
||||||
|
-e http_proxy=$http_proxy \
|
||||||
|
-e https_proxy=$https_proxy \
|
||||||
|
-e HTTP_PROXY=$HTTP_PROXY \
|
||||||
|
-e HTTPS_PROXY=$HTTPS_PROXY \
|
||||||
|
-v .:/var/lib/direktil \
|
||||||
|
$DLS_IMG &
|
||||||
|
# -auto-unlock 'N0v!T'
|
||||||
|
sleep 2
|
||||||
|
}
|
||||||
|
|
||||||
|
source $(dirname $0)/.common
|
||||||
|
check_root
|
||||||
|
prereqs_dls
|
||||||
|
prereqs
|
||||||
|
dir2config
|
||||||
|
start_store
|
||||||
|
unlock_store
|
||||||
|
|
76
scripts/1.qemu.sh
Executable file
76
scripts/1.qemu.sh
Executable file
@ -0,0 +1,76 @@
|
|||||||
|
#! /bin/sh
|
||||||
|
#
|
||||||
|
# This collection of scripts aims to install a NOVIT cluster easily, with help of QEMU
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# QEMU local bridge name. If you specificy a custom name, you may have to configure qemu bridge helper to allow it
|
||||||
|
QEMU_BR_NAME=virbr0
|
||||||
|
#
|
||||||
|
# QEMU VM default disk size
|
||||||
|
QEMU_DISK_SIZE=30G
|
||||||
|
# Allocated CPUs to QEMU VMs
|
||||||
|
QEMU_VM_CPU=4
|
||||||
|
# Allocated Memory to QEMU VMs
|
||||||
|
QEMU_VM_MEM=8096
|
||||||
|
################
|
||||||
|
|
||||||
|
|
||||||
|
## QEMU functions
|
||||||
|
#
|
||||||
|
prereqs_qemu() {
|
||||||
|
for com in qemu-system-x86_64 truncate docker iptables ; do
|
||||||
|
command -v $com 1>/dev/null || error Command $com not found, please install it. Aborting...
|
||||||
|
done
|
||||||
|
}
|
||||||
|
setup_network_qemu() {
|
||||||
|
if ! ip li show $QEMU_BR_NAME ; then
|
||||||
|
ip li add name $QEMU_BR_NAME type bridge
|
||||||
|
ip li set $QEMU_BR_NAME up
|
||||||
|
fi
|
||||||
|
QEMU_BR_IP=$(extract_var clusters gateway)
|
||||||
|
QEMU_BR_MASK=$(extract_var clusters netmask)
|
||||||
|
if [ $(echo $QEMU_BR_IP | wc -w) -gt 1 ]; then
|
||||||
|
perror "More than one cluster is configured, not compatible with our quick-start setup, exiting"
|
||||||
|
fi
|
||||||
|
pinfo "Using detected gateway IP $QEMU_BR_IP for bridge $QEMU_BR_NAME"
|
||||||
|
if ! ip a show dev $QEMU_BR_NAME | grep $QEMU_BR_IP ; then
|
||||||
|
ip a add $QEMU_BR_IP/$QEMU_BR_MASK dev $QEMU_BR_NAME
|
||||||
|
sudo iptables -t nat -I POSTROUTING -j MASQUERADE -s $QEMU_BR_IP/$QEMU_BR_MASK \! -o $QEMU_BR_NAME
|
||||||
|
sudo iptables -I FORWARD -o $QEMU_BR_NAME -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
||||||
|
sudo iptables -I FORWARD -j ACCEPT -i $QEMU_BR_NAME
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
run_qemu() {
|
||||||
|
id=1
|
||||||
|
for host in ${!hosts[*]}; do
|
||||||
|
ip route show |grep "${hosts[$host]} dev $QEMU_BR_NAME" ||\
|
||||||
|
ip route add ${hosts[$host]} dev $QEMU_BR_NAME
|
||||||
|
if test -f $ctxdir/data/$host/pid ; then
|
||||||
|
pinfo "Detected a pid file, killing process in case VM was already started"
|
||||||
|
kill $(cat $ctxdir/data/$host/pid) && sleep 1
|
||||||
|
fi
|
||||||
|
pinfo "Starting host $host with ip ${hosts[$host]}"
|
||||||
|
qemu-system-x86_64 -enable-kvm -smp $QEMU_VM_CPU -m $QEMU_VM_MEM \
|
||||||
|
-nic bridge,br=$QEMU_BR_NAME,mac=42:42:42:42:42:0${id} \
|
||||||
|
-kernel $ctxdir/data/$host/kernel -initrd $ctxdir/data/$host/initrd-v2 -vga qxl \
|
||||||
|
-drive format=raw,file=$ctxdir/data/$host/disk &
|
||||||
|
echo $! >$ctxdir/data/$host/pid
|
||||||
|
((++id))
|
||||||
|
done
|
||||||
|
pinfo "$(ls $ctxdir/data/*/pid|wc -w) host(s) have been started"
|
||||||
|
}
|
||||||
|
# # # # # # # #
|
||||||
|
|
||||||
|
source $(dirname $0)/.common
|
||||||
|
check_root
|
||||||
|
prereqs
|
||||||
|
check_conf
|
||||||
|
#fresh_start
|
||||||
|
trap clean SIGINT SIGTERM SIGKILL
|
||||||
|
declare -A hosts
|
||||||
|
setup_network_qemu
|
||||||
|
get_hosts
|
||||||
|
get_parts
|
||||||
|
run_qemu
|
||||||
|
#clean
|
||||||
|
|
88
scripts/2.first_start_k8s.sh
Executable file
88
scripts/2.first_start_k8s.sh
Executable file
@ -0,0 +1,88 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Starting control plane
|
||||||
|
|
||||||
|
|
||||||
|
prereqs_control_plane() {
|
||||||
|
for com in kubectl ; do
|
||||||
|
command -v $com 1>/dev/null || perror "Command $com is not installed, aborting..."
|
||||||
|
done
|
||||||
|
}
|
||||||
|
prereqs_addons() {
|
||||||
|
for com in curl kubectl ; do
|
||||||
|
command -v $com 1>/dev/null || perror "Command $com is not installed, aborting..."
|
||||||
|
done
|
||||||
|
cluster=$(basename $ctxdir/clusters/*.yaml|sed 's/.yaml//')
|
||||||
|
}
|
||||||
|
|
||||||
|
checkup() {
|
||||||
|
for host in ${!hosts[*]}; do
|
||||||
|
tries=3
|
||||||
|
while :
|
||||||
|
do
|
||||||
|
pinfo "Checking availability of node $host..."
|
||||||
|
ssh root@${hosts[$host]} <<< true &>/dev/null
|
||||||
|
if [ $? == 0 ]; then
|
||||||
|
pinfo "VM $host is up!"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
((tries--))
|
||||||
|
if [ $tries < 1 ]; then
|
||||||
|
pinfo "Timeout waiting for node detection, please investigate why node $host is not up by now"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
sleep 30
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
start_control_plane() {
|
||||||
|
for host in ${!hosts[*]}; do
|
||||||
|
ssh root@${hosts[$host]} << EOF
|
||||||
|
if ls /etc/kubernetes/manifests.static/* &>/dev/null ; then
|
||||||
|
mv /etc/kubernetes/manifests.static/* /var/lib/kubelet/manifests/
|
||||||
|
fi
|
||||||
|
EOF
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
install_addons() {
|
||||||
|
body='{"Kind":"cluster","Name":"'$cluster'","Assets":["addons"]}'
|
||||||
|
download_id=$(dls /authorize-download -d "$body"|tr -d \")
|
||||||
|
dls /public/downloads/${download_id}/addons |\
|
||||||
|
kctl apply -f -
|
||||||
|
}
|
||||||
|
|
||||||
|
approve_kubelet_certificates() {
|
||||||
|
tries=5
|
||||||
|
nodes_num=$(kctl get node -oname|wc -l)
|
||||||
|
while [ "$nodes_num" != "${#hosts[*]}" ] ; do
|
||||||
|
pinfo "Waiting for certificates requests to be created by Kubelet when it's ready... ($tries/5)"
|
||||||
|
sleep 60s
|
||||||
|
csrs="$(kctl get csr|awk '/Pending/ {print $1}')"
|
||||||
|
if [ "$csrs" != "" ]; then
|
||||||
|
kctl certificate approve $csrs
|
||||||
|
fi
|
||||||
|
((tries--))
|
||||||
|
if [ "$tries" < 1 ]; then
|
||||||
|
pinfo "Timeout waiting for kubelet certificates creation, please investigate why all nodes are not up by now"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
pinfo "All kubelets ($nodes_num) are up, enjoy !"
|
||||||
|
}
|
||||||
|
|
||||||
|
source $(dirname $0)/.common
|
||||||
|
prereqs
|
||||||
|
prereqs_control_plane
|
||||||
|
prereqs_addons
|
||||||
|
check_conf
|
||||||
|
unlock_store
|
||||||
|
declare -A hosts && get_hosts
|
||||||
|
checkup
|
||||||
|
start_control_plane
|
||||||
|
create_kubeconfig
|
||||||
|
install_addons
|
||||||
|
approve_kubelet_certificates # clients and serving certs
|
||||||
|
|
@ -40,7 +40,7 @@ spec:
|
|||||||
- name: ETCD_INITIAL_CLUSTER
|
- name: ETCD_INITIAL_CLUSTER
|
||||||
value: {{ range $i, $host := hosts_by_group "master" }}{{ if gt $i 0 }},{{end}}{{$host.name}}=https://{{$host.ip}}:2380{{end}}
|
value: {{ range $i, $host := hosts_by_group "master" }}{{ if gt $i 0 }},{{end}}{{$host.name}}=https://{{$host.ip}}:2380{{end}}
|
||||||
- name: ETCD_INITIAL_CLUSTER_STATE
|
- name: ETCD_INITIAL_CLUSTER_STATE
|
||||||
value: {{.etcd.cluster_state}}
|
value: {{ .vars.etcd.cluster_state }}
|
||||||
- name: ETCD_INITIAL_CLUSTER_TOKEN
|
- name: ETCD_INITIAL_CLUSTER_TOKEN
|
||||||
value: '{{ token "etcd-initial-cluster" }}'
|
value: '{{ token "etcd-initial-cluster" }}'
|
||||||
- name: ETCDCTL_ENDPOINTS
|
- name: ETCDCTL_ENDPOINTS
|
||||||
|
Loading…
Reference in New Issue
Block a user