#!/bin/bash source $(dirname $0)/vars ## Helper funcs pinfo() { echo -e "\e[32m$@\e[39m" } perror() { echo -e "\e[31m$@\e[39m" exit 1 } dls() { path=$1 shift if [ -z "$DLS_ADM_TOKEN" ]; then test -f $tknfile && DLS_ADM_TOKEN=$(cat $tknfile) fi curl -s -H 'Content-Type: application/json' -H 'Authorization: '$DLS_ADM_TOKEN http://127.0.0.1:7606${path} "$@" } kctl() { kubectl --kubeconfig $ctxdir/kubeconfig "$@" } extract_var() { where=$1 what=$2 grep -rh " $what: " $ctxdir/$where/*.yaml|awk '{print $2}' } ## Run funcs check_root() { [ $UID != 0 ] && perror This program needs to be run as root. Aborting... } prereqs() { set -e cd "$(dirname $0)/.." ctxdir="$PWD" source scripts/vars } check_conf() { all_clusters=$(ls $ctxdir/clusters|wc -l) if [ "$all_clusters" != "1" ]; then perror "Those helper scripts are not capable of running several clusters at once, check your configuration. Aborting..." fi } fresh_start() { rm -rf $ctxdir/secrets } unlock_store() { # Unlock DLS store after checking if online # if [ -f $tknfile ]; then DLS_ADM_TOKEN="$(cat $tknfile)" fi store_state=$(curl -H 'Content-Type: application/json' -H "Authorization: $DLS_ADM_TOKEN" -sw %{http_code} localhost:7606/hosts -o /dev/null) if [ "$store_state" == "000" ] ; then perror "Direktil Local Server seems not up, please fix. Aborting." elif [ "$store_state" == "200" ] ; then pinfo "Direktil Local Server store already unlocked" else pinfo "Unlocking the DLS store ..." DLS_ADM_TOKEN=$(dls /public/unlock-store -d "\"${DLS_UNLOCK_TOKEN}\""|tr -d \") pinfo "Admin access token is $DLS_ADM_TOKEN" echo $DLS_ADM_TOKEN > $tknfile chmod 444 $tknfile fi } get_hosts() { cd $ctxdir/hosts hosts_files=$(ls *.yaml|sed 's/.yaml//') for h in ${hosts_files}; do ip=$(grep ip: $ctxdir/hosts/${h}.yaml|awk '{print $2}') hosts[$h]="$ip" done cd - &>/dev/null } get_parts() { for host in ${!hosts[*]}; do mkdir -p $ctxdir/data/$host for part in kernel initrd-v2 do partfile=$ctxdir/data/$host/$part test -f $partfile || dls /hosts/$host/$part -o $partfile done diskfile=$ctxdir/data/$host/disk test -f $diskfile || truncate -s ${QEMU_DISK_SIZE:-30G} $diskfile done } destroyvms() { for host in ${!hosts[*]}; do host=$1 if test -f $ctxdir/data/$host/pid ; then pinfo "Detected a pid file, killing process in case VM was already started" kill $(cat $ctxdir/data/$host/pid) && sleep 1 fi done } create_kubeconfig() { if test -f $ctxdir/kubeconfig; then pinfo "kubeconfig file detected in config dir, won't overwrite... remove it for an update." return fi adm_token=$(dls /clusters/base/tokens/admin) ca_cert=$(dls /clusters/base/CAs/cluster/certificate|base64 -w0) vip=$(extract_var clusters public_vip) vip_api_port=$(extract_var clusters api_port) pinfo "Writing new kubeconfig conf in $ctxdir directory, you may want to move it to \~/.kube/ directory for usability" sed -e "s/__VIP_IP__/$vip/" \ -e "s/__VIP_API_PORT__/$vip_api_port/" \ -e "s/__CA_CERT__/$ca_cert/" \ -e "s/__ADM_TOKEN__/$adm_token/" \ scripts/.template.kubeconfig > $ctxdir/kubeconfig chmod 444 $ctxdir/kubeconfig } clean() { set +e sudo iptables -t nat -D POSTROUTING -j MASQUERADE -s $QEMU_BR_IP \! -o $QEMU_BR_NAME &>/dev/null sudo iptables -D FORWARD -o $QEMU_BR_NAME -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT &>/dev/null sudo iptables -D FORWARD -j ACCEPT -i $QEMU_BR_NAME &>/dev/null }