mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
vendor files
This commit is contained in:
36
vendor/k8s.io/kubernetes/cluster/vagrant/OWNERS
generated
vendored
Normal file
36
vendor/k8s.io/kubernetes/cluster/vagrant/OWNERS
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
approvers:
|
||||
- derekwaynecarr
|
||||
reviewers:
|
||||
- ArtfulCoder
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- derekwaynecarr
|
||||
- caesarxuchao
|
||||
- vishh
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- erictune
|
||||
- dchen1107
|
||||
- zmerlynn
|
||||
- justinsb
|
||||
- roberthbailey
|
||||
- eparis
|
||||
- jlowdermilk
|
||||
- piosz
|
||||
- jsafrane
|
||||
- jbeda
|
||||
- madhusudancs
|
||||
- jayunit100
|
||||
- cjcullen
|
||||
- david-mcmahon
|
||||
- mfojtik
|
||||
- pweil-
|
||||
- dcbw
|
||||
- ivan4th
|
||||
- filbranden
|
||||
- dshulyak
|
||||
- k82cn
|
||||
- caseydavenport
|
||||
- johscheuer
|
122
vendor/k8s.io/kubernetes/cluster/vagrant/config-default.sh
generated
vendored
Executable file
122
vendor/k8s.io/kubernetes/cluster/vagrant/config-default.sh
generated
vendored
Executable file
@ -0,0 +1,122 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
## Contains configuration values for interacting with the Vagrant cluster
|
||||
|
||||
# Number of nodes in the cluster
|
||||
NUM_NODES=${NUM_NODES-"1"}
|
||||
export NUM_NODES
|
||||
|
||||
# The IP of the master
|
||||
export MASTER_IP=${MASTER_IP-"10.245.1.2"}
|
||||
export KUBE_MASTER_IP=${MASTER_IP}
|
||||
|
||||
export INSTANCE_PREFIX="kubernetes"
|
||||
export MASTER_NAME="${INSTANCE_PREFIX}-master"
|
||||
|
||||
# Should the master serve as a node
|
||||
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false}
|
||||
|
||||
# Map out the IPs, names and container subnets of each node
|
||||
export NODE_IP_BASE=${NODE_IP_BASE-"10.245.1."}
|
||||
NODE_CONTAINER_SUBNET_BASE="10.246"
|
||||
MASTER_CONTAINER_NETMASK="255.255.255.0"
|
||||
MASTER_CONTAINER_ADDR="${NODE_CONTAINER_SUBNET_BASE}.0.1"
|
||||
MASTER_CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.1/24"
|
||||
CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.0/16"
|
||||
for ((i=0; i < NUM_NODES; i++)) do
|
||||
NODE_IPS[$i]="${NODE_IP_BASE}$((i+3))"
|
||||
NODE_NAMES[$i]="${INSTANCE_PREFIX}-node-$((i+1))"
|
||||
NODE_CONTAINER_SUBNETS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1/24"
|
||||
NODE_CONTAINER_ADDRS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1"
|
||||
NODE_CONTAINER_NETMASKS[$i]="255.255.255.0"
|
||||
VAGRANT_NODE_NAMES[$i]="node-$((i+1))"
|
||||
done
|
||||
|
||||
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.246.0.0/16}"
|
||||
|
||||
SERVICE_CLUSTER_IP_RANGE=10.247.0.0/16 # formerly PORTAL_NET
|
||||
|
||||
# Since this isn't exposed on the network, default to a simple user/passwd
|
||||
MASTER_USER="${MASTER_USER:-vagrant}"
|
||||
MASTER_PASSWD="${MASTER_PASSWD:-vagrant}"
|
||||
|
||||
# Admission Controllers to invoke prior to persisting objects in cluster
|
||||
# If we included ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely.
|
||||
ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,PVCProtection,ResourceQuota
|
||||
|
||||
# Optional: Enable node logging.
|
||||
ENABLE_NODE_LOGGING=false
|
||||
LOGGING_DESTINATION=elasticsearch
|
||||
|
||||
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
|
||||
ENABLE_CLUSTER_LOGGING=false
|
||||
ELASTICSEARCH_LOGGING_REPLICAS=1
|
||||
|
||||
# Optional: Cluster monitoring to setup as part of the cluster bring up:
|
||||
# none - No cluster monitoring setup
|
||||
# influxdb - Heapster, InfluxDB, and Grafana
|
||||
# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging
|
||||
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
|
||||
|
||||
# Extra options to set on the Docker command line. This is useful for setting
|
||||
# --insecure-registry for local registries, or globally configuring selinux options
|
||||
# TODO Enable selinux when Fedora 21 repositories get an updated docker package
|
||||
# see https://bugzilla.redhat.com/show_bug.cgi?id=1216151
|
||||
#EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-} -b=cbr0 --selinux-enabled --insecure-registry 10.0.0.0/8"
|
||||
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-} --insecure-registry 10.0.0.0/8 -s overlay"
|
||||
|
||||
# Flag to tell the kubelet to enable CFS quota support
|
||||
ENABLE_CPU_CFS_QUOTA="${KUBE_ENABLE_CPU_CFS_QUOTA:-true}"
|
||||
|
||||
# Optional: Install cluster DNS.
|
||||
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
|
||||
DNS_SERVER_IP="10.247.0.10"
|
||||
DNS_DOMAIN="cluster.local"
|
||||
|
||||
# Optional: Enable DNS horizontal autoscaler
|
||||
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}"
|
||||
|
||||
# Optional: Install Kubernetes UI
|
||||
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
|
||||
|
||||
# Optional: Enable setting flags for kube-apiserver to turn on behavior in active-dev
|
||||
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}"
|
||||
|
||||
# Determine extra certificate names for master
|
||||
octets=($(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e 's|/.*||' -e 's/\./ /g'))
|
||||
((octets[3]+=1))
|
||||
service_ip=$(echo "${octets[*]}" | sed 's/ /./g')
|
||||
MASTER_EXTRA_SANS="IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${DNS_DOMAIN},DNS:${MASTER_NAME}"
|
||||
|
||||
NETWORK_PROVIDER="${NETWORK_PROVIDER:-none}" # opencontrail, kubenet, etc
|
||||
if [ "${NETWORK_PROVIDER}" == "kubenet" ]; then
|
||||
CLUSTER_IP_RANGE="${CONTAINER_SUBNET}"
|
||||
fi
|
||||
|
||||
# If enabled kube-controller-manager will be started with the --enable-hostpath-provisioner flag
|
||||
ENABLE_HOSTPATH_PROVISIONER="${ENABLE_HOSTPATH_PROVISIONER:-true}"
|
||||
|
||||
# OpenContrail networking plugin specific settings
|
||||
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
|
||||
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
|
||||
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
|
||||
|
||||
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
|
||||
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
|
||||
|
||||
# Default fallback NETWORK_IF_NAME, will be used in case when no 'VAGRANT-BEGIN' comments were defined in network-script
|
||||
export DEFAULT_NETWORK_IF_NAME="eth0"
|
29
vendor/k8s.io/kubernetes/cluster/vagrant/config-test.sh
generated
vendored
Normal file
29
vendor/k8s.io/kubernetes/cluster/vagrant/config-test.sh
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
## Contains configuration values for interacting with the Vagrant cluster in test mode
|
||||
#Set NUM_NODES to minimum required for testing.
|
||||
NUM_NODES=2
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/cluster/vagrant/config-default.sh"
|
||||
|
||||
# Do not register the master kubelet during testing
|
||||
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false}
|
||||
|
||||
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
|
||||
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
|
||||
|
105
vendor/k8s.io/kubernetes/cluster/vagrant/pod-ip-test.sh
generated
vendored
Executable file
105
vendor/k8s.io/kubernetes/cluster/vagrant/pod-ip-test.sh
generated
vendored
Executable file
@ -0,0 +1,105 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
echoOK() {
|
||||
TC='\e['
|
||||
RegB="${TC}0m"
|
||||
if [ "$1" -eq "0" ]; then
|
||||
Green="${TC}32m"
|
||||
echo -e "[${Green}OK${RegB}]"
|
||||
else
|
||||
Red="${TC}31m"
|
||||
echo -e "[${Red}FAIL${RegB}]"
|
||||
echo "Check log file."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
usage() {
|
||||
echo "Usage options: [--logfile <path to file>]"
|
||||
}
|
||||
|
||||
logfile=/dev/null
|
||||
while [[ $# > 0 ]]; do
|
||||
key="$1"
|
||||
shift
|
||||
case $key in
|
||||
-l|--logfile)
|
||||
logfile="$1"
|
||||
if [ "$logfile" == "" ]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
# unknown option
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
cd "${KUBE_ROOT}"
|
||||
|
||||
echo All verbose output will be redirected to $logfile, use --logfile option to change.
|
||||
|
||||
printf "Start the cluster with 2 nodes .. "
|
||||
export NUM_NODES=2
|
||||
export KUBERNETES_PROVIDER=vagrant
|
||||
|
||||
(cluster/kube-up.sh >>"$logfile" 2>&1) || true
|
||||
echoOK $?
|
||||
|
||||
printf "Check if node-1 can reach kubernetes master .. "
|
||||
vagrant ssh node-1 -- ping -c 10 kubernetes-master >>"$logfile" 2>&1
|
||||
echoOK $?
|
||||
printf "Check if node-2 can reach kubernetes master .. "
|
||||
vagrant ssh node-2 -- ping -c 10 kubernetes-master >>"$logfile" 2>&1
|
||||
echoOK $?
|
||||
|
||||
printf "Pull an image that runs a web server on node-1 .. "
|
||||
vagrant ssh node-1 -- 'sudo docker pull kubernetes/serve_hostname' >>"$logfile" 2>&1
|
||||
echoOK $?
|
||||
printf "Pull an image that runs a web server on node-2 .. "
|
||||
vagrant ssh node-2 -- 'sudo docker pull kubernetes/serve_hostname' >>"$logfile" 2>&1
|
||||
echoOK $?
|
||||
|
||||
printf "Run the server on node-1 .. "
|
||||
vagrant ssh node-1 -- sudo docker run -d kubernetes/serve_hostname >>"$logfile" 2>&1
|
||||
echoOK $?
|
||||
printf "Run the server on node-2 .. "
|
||||
vagrant ssh node-2 -- sudo docker run -d kubernetes/serve_hostname >>"$logfile" 2>&1
|
||||
echoOK $?
|
||||
|
||||
printf "Run ping from node-1 to docker bridges and to the containers on both nodes .. "
|
||||
vagrant ssh node-1 -- 'ping -c 20 10.246.0.1 && ping -c 20 10.246.1.1 && ping -c 20 10.246.0.2 && ping -c 20 10.246.1.2' >>"$logfile" 2>&1
|
||||
echoOK $?
|
||||
printf "Same pinch from node-2 .. "
|
||||
vagrant ssh node-2 -- 'ping -c 20 10.246.0.1 && ping -c 20 10.246.1.1 && ping -c 20 10.246.0.2 && ping -c 20 10.246.1.2' >>"$logfile" 2>&1
|
||||
echoOK $?
|
||||
|
||||
printf "tcp check, curl to both the running webservers from node-1 .. "
|
||||
vagrant ssh node-1 -- 'curl -sS 10.246.0.2:9376 && curl -sS 10.246.1.2:9376' >>"$logfile" 2>&1
|
||||
echoOK $?
|
||||
printf "tcp check, curl to both the running webservers from node-2 .. "
|
||||
vagrant ssh node-2 -- 'curl -sS 10.246.0.2:9376 && curl -sS 10.246.1.2:9376' >>"$logfile" 2>&1
|
||||
echoOK $?
|
||||
|
||||
printf "All good, destroy the cluster .. "
|
||||
vagrant destroy -f >>"$logfile" 2>&1
|
||||
echoOK $?
|
122
vendor/k8s.io/kubernetes/cluster/vagrant/provision-master.sh
generated
vendored
Executable file
122
vendor/k8s.io/kubernetes/cluster/vagrant/provision-master.sh
generated
vendored
Executable file
@ -0,0 +1,122 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# Set the host name explicitly
|
||||
# See: https://github.com/mitchellh/vagrant/issues/2430
|
||||
hostnamectl set-hostname ${MASTER_NAME}
|
||||
# Set the variable to empty value explicitly
|
||||
if_to_edit=""
|
||||
|
||||
if [[ "$(grep 'VERSION_ID' /etc/os-release)" =~ ^VERSION_ID=23 ]]; then
|
||||
# Disable network interface being managed by Network Manager (needed for Fedora 21+)
|
||||
NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/
|
||||
if_to_edit=$( find ${NETWORK_CONF_PATH}ifcfg-* | xargs grep -l VAGRANT-BEGIN )
|
||||
for if_conf in ${if_to_edit}; do
|
||||
grep -q ^NM_CONTROLLED= ${if_conf} || echo 'NM_CONTROLLED=no' >> ${if_conf}
|
||||
sed -i 's/#^NM_CONTROLLED=.*/NM_CONTROLLED=no/' ${if_conf}
|
||||
done;
|
||||
systemctl restart network
|
||||
fi
|
||||
|
||||
# needed for vsphere support
|
||||
# handle the case when no 'VAGRANT-BEGIN' comment was defined in network-scripts
|
||||
# set the NETWORK_IF_NAME to have a default value in such case
|
||||
NETWORK_IF_NAME=`echo ${if_to_edit} | awk -F- '{ print $3 }'`
|
||||
if [[ -z "$NETWORK_IF_NAME" ]]; then
|
||||
NETWORK_IF_NAME=${DEFAULT_NETWORK_IF_NAME}
|
||||
fi
|
||||
|
||||
# Setup hosts file to support ping by hostname to each node in the cluster from apiserver
|
||||
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
|
||||
node=${NODE_NAMES[$i]}
|
||||
ip=${NODE_IPS[$i]}
|
||||
if [ ! "$(cat /etc/hosts | grep $node)" ]; then
|
||||
echo "Adding $node to hosts file"
|
||||
echo "$ip $node" >> /etc/hosts
|
||||
fi
|
||||
done
|
||||
echo "127.0.0.1 localhost" >> /etc/hosts # enables cmds like 'kubectl get pods' on master.
|
||||
echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts
|
||||
|
||||
enable-accounting
|
||||
prepare-package-manager
|
||||
|
||||
# Configure the master network
|
||||
if [ "${NETWORK_PROVIDER}" != "kubenet" ]; then
|
||||
provision-network-master
|
||||
fi
|
||||
|
||||
write-salt-config kubernetes-master
|
||||
|
||||
# Generate and distribute a shared secret (bearer token) to
|
||||
# apiserver and kubelet so that kubelet can authenticate to
|
||||
# apiserver to send events.
|
||||
known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
|
||||
if [[ ! -f "${known_tokens_file}" ]]; then
|
||||
|
||||
mkdir -p /srv/salt-overlay/salt/kube-apiserver
|
||||
known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
|
||||
(umask u=rw,go= ;
|
||||
echo "$KUBELET_TOKEN,kubelet,kubelet" > $known_tokens_file;
|
||||
echo "$KUBE_PROXY_TOKEN,kube_proxy,kube_proxy" >> $known_tokens_file;
|
||||
echo "$KUBE_BEARER_TOKEN,admin,admin" >> $known_tokens_file)
|
||||
|
||||
mkdir -p /srv/salt-overlay/salt/kubelet
|
||||
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
|
||||
(umask u=rw,go= ; echo "{\"BearerToken\": \"$KUBELET_TOKEN\", \"Insecure\": true }" > $kubelet_auth_file)
|
||||
|
||||
create-salt-kubelet-auth
|
||||
create-salt-kubeproxy-auth
|
||||
# Generate tokens for other "service accounts". Append to known_tokens.
|
||||
#
|
||||
# NB: If this list ever changes, this script actually has to
|
||||
# change to detect the existence of this file, kill any deleted
|
||||
# old tokens and add any new tokens (to handle the upgrade case).
|
||||
service_accounts=("system:scheduler" "system:controller_manager" "system:logging" "system:monitoring" "system:dns")
|
||||
for account in "${service_accounts[@]}"; do
|
||||
token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
echo "${token},${account},${account}" >> "${known_tokens_file}"
|
||||
done
|
||||
fi
|
||||
|
||||
|
||||
readonly BASIC_AUTH_FILE="/srv/salt-overlay/salt/kube-apiserver/basic_auth.csv"
|
||||
if [ ! -e "${BASIC_AUTH_FILE}" ]; then
|
||||
mkdir -p /srv/salt-overlay/salt/kube-apiserver
|
||||
(umask 077;
|
||||
echo "${MASTER_PASSWD},${MASTER_USER},admin" > "${BASIC_AUTH_FILE}")
|
||||
fi
|
||||
|
||||
# Enable Fedora Cockpit on host to support Kubernetes administration
|
||||
# Access it by going to <master-ip>:9090 and login as vagrant/vagrant
|
||||
if ! which /usr/libexec/cockpit-ws &>/dev/null; then
|
||||
|
||||
pushd /etc/yum.repos.d
|
||||
curl -OL https://copr.fedorainfracloud.org/coprs/g/cockpit/cockpit-preview/repo/fedora-23/msuchy-cockpit-preview-fedora-23.repo
|
||||
dnf install -y cockpit cockpit-kubernetes docker socat ethtool
|
||||
popd
|
||||
|
||||
systemctl enable cockpit.socket
|
||||
systemctl start cockpit.socket
|
||||
fi
|
||||
|
||||
install-salt
|
||||
|
||||
run-salt
|
91
vendor/k8s.io/kubernetes/cluster/vagrant/provision-network-master.sh
generated
vendored
Normal file
91
vendor/k8s.io/kubernetes/cluster/vagrant/provision-network-master.sh
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# provision-network-master configures flannel on the master
|
||||
function provision-network-master {
|
||||
|
||||
echo "Provisioning network on master"
|
||||
|
||||
FLANNEL_ETCD_URL="http://${MASTER_IP}:4379"
|
||||
|
||||
# Install etcd for flannel data
|
||||
if ! which etcd >/dev/null 2>&1; then
|
||||
|
||||
dnf install -y etcd
|
||||
|
||||
# Modify etcd configuration for flannel data
|
||||
cat <<EOF >/etc/etcd/etcd.conf
|
||||
ETCD_NAME=flannel
|
||||
ETCD_DATA_DIR="/var/lib/etcd/flannel.etcd"
|
||||
ETCD_LISTEN_PEER_URLS="http://${MASTER_IP}:4380"
|
||||
ETCD_LISTEN_CLIENT_URLS="http://${MASTER_IP}:4379"
|
||||
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://${MASTER_IP}:4380"
|
||||
ETCD_INITIAL_CLUSTER="flannel=http://${MASTER_IP}:4380"
|
||||
ETCD_ADVERTISE_CLIENT_URLS="${FLANNEL_ETCD_URL}"
|
||||
EOF
|
||||
|
||||
# fix the etcd boot failure issue
|
||||
sed -i '/^Restart/a RestartSec=10' /usr/lib/systemd/system/etcd.service
|
||||
systemctl daemon-reload
|
||||
|
||||
# Enable and start etcd
|
||||
systemctl enable etcd
|
||||
systemctl start etcd
|
||||
|
||||
fi
|
||||
|
||||
# Install flannel for overlay
|
||||
if ! which flanneld >/dev/null 2>&1; then
|
||||
|
||||
dnf install -y flannel
|
||||
|
||||
cat <<EOF >/etc/flannel-config.json
|
||||
{
|
||||
"Network": "${CONTAINER_SUBNET}",
|
||||
"SubnetLen": 24,
|
||||
"Backend": {
|
||||
"Type": "udp",
|
||||
"Port": 8285
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
# Import default configuration into etcd for master setup
|
||||
etcdctl -C ${FLANNEL_ETCD_URL} set /coreos.com/network/config < /etc/flannel-config.json
|
||||
|
||||
# Configure local daemon to speak to master
|
||||
NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/
|
||||
if_to_edit=$( find ${NETWORK_CONF_PATH}ifcfg-* | xargs grep -l VAGRANT-BEGIN )
|
||||
NETWORK_IF_NAME=`echo ${if_to_edit} | awk -F- '{ print $3 }'`
|
||||
# needed for vsphere support
|
||||
# handle the case when no 'VAGRANT-BEGIN' comment was defined in network-scripts
|
||||
# set the NETWORK_IF_NAME to have a default value in such case
|
||||
if [[ -z "$NETWORK_IF_NAME" ]]; then
|
||||
NETWORK_IF_NAME=${DEFAULT_NETWORK_IF_NAME}
|
||||
fi
|
||||
cat <<EOF >/etc/sysconfig/flanneld
|
||||
FLANNEL_ETCD="${FLANNEL_ETCD_URL}"
|
||||
FLANNEL_ETCD_KEY="/coreos.com/network"
|
||||
FLANNEL_OPTIONS="-iface=${NETWORK_IF_NAME} --ip-masq"
|
||||
EOF
|
||||
|
||||
# Start flannel
|
||||
systemctl enable flanneld
|
||||
systemctl start flanneld
|
||||
fi
|
||||
|
||||
echo "Network configuration verified"
|
||||
}
|
51
vendor/k8s.io/kubernetes/cluster/vagrant/provision-network-node.sh
generated
vendored
Normal file
51
vendor/k8s.io/kubernetes/cluster/vagrant/provision-network-node.sh
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# provision-network-node configures flannel on the node
|
||||
function provision-network-node {
|
||||
|
||||
echo "Provisioning network on node"
|
||||
|
||||
FLANNEL_ETCD_URL="http://${MASTER_IP}:4379"
|
||||
|
||||
# Install flannel for overlay
|
||||
if ! which flanneld >/dev/null 2>&1; then
|
||||
|
||||
dnf install -y flannel
|
||||
|
||||
# Configure local daemon to speak to master
|
||||
NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/
|
||||
if_to_edit=$( find ${NETWORK_CONF_PATH}ifcfg-* | xargs grep -l VAGRANT-BEGIN )
|
||||
NETWORK_IF_NAME=`echo ${if_to_edit} | awk -F- '{ print $3 }'`
|
||||
# needed for vsphere support
|
||||
# handle the case when no 'VAGRANT-BEGIN' comment was defined in network-scripts
|
||||
# set the NETWORK_IF_NAME to have a default value in such case
|
||||
if [[ -z "$NETWORK_IF_NAME" ]]; then
|
||||
NETWORK_IF_NAME=${DEFAULT_NETWORK_IF_NAME}
|
||||
fi
|
||||
cat <<EOF >/etc/sysconfig/flanneld
|
||||
FLANNEL_ETCD="${FLANNEL_ETCD_URL}"
|
||||
FLANNEL_ETCD_KEY="/coreos.com/network"
|
||||
FLANNEL_OPTIONS="-iface=${NETWORK_IF_NAME} --ip-masq"
|
||||
EOF
|
||||
|
||||
# Start flannel
|
||||
systemctl enable flanneld
|
||||
systemctl start flanneld
|
||||
fi
|
||||
|
||||
echo "Network configuration verified"
|
||||
}
|
88
vendor/k8s.io/kubernetes/cluster/vagrant/provision-node.sh
generated
vendored
Executable file
88
vendor/k8s.io/kubernetes/cluster/vagrant/provision-node.sh
generated
vendored
Executable file
@ -0,0 +1,88 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# Set the host name explicitly
|
||||
# See: https://github.com/mitchellh/vagrant/issues/2430
|
||||
hostnamectl set-hostname ${NODE_NAME}
|
||||
if_to_edit=""
|
||||
|
||||
if [[ "$(grep 'VERSION_ID' /etc/os-release)" =~ ^VERSION_ID=23 ]]; then
|
||||
# Disable network interface being managed by Network Manager (needed for Fedora 21+)
|
||||
NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/
|
||||
if_to_edit=$( find ${NETWORK_CONF_PATH}ifcfg-* | xargs grep -l VAGRANT-BEGIN )
|
||||
for if_conf in ${if_to_edit}; do
|
||||
grep -q ^NM_CONTROLLED= ${if_conf} || echo 'NM_CONTROLLED=no' >> ${if_conf}
|
||||
sed -i 's/#^NM_CONTROLLED=.*/NM_CONTROLLED=no/' ${if_conf}
|
||||
done;
|
||||
systemctl restart network
|
||||
fi
|
||||
|
||||
# needed for vsphere support
|
||||
# handle the case when no 'VAGRANT-BEGIN' comment was defined in network-scripts
|
||||
# set the NETWORK_IF_NAME to have a default value in such case
|
||||
NETWORK_IF_NAME=`echo ${if_to_edit} | awk -F- '{ print $3 }'`
|
||||
if [[ -z "$NETWORK_IF_NAME" ]]; then
|
||||
NETWORK_IF_NAME=${DEFAULT_NETWORK_IF_NAME}
|
||||
fi
|
||||
|
||||
# Setup hosts file to support ping by hostname to master
|
||||
if [ ! "$(cat /etc/hosts | grep $MASTER_NAME)" ]; then
|
||||
echo "Adding $MASTER_NAME to hosts file"
|
||||
echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts
|
||||
fi
|
||||
echo "$NODE_IP $NODE_NAME" >> /etc/hosts
|
||||
|
||||
# Setup hosts file to support ping by hostname to each node in the cluster
|
||||
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
|
||||
node=${NODE_NAMES[$i]}
|
||||
ip=${NODE_IPS[$i]}
|
||||
if [ ! "$(cat /etc/hosts | grep $node)" ]; then
|
||||
echo "Adding $node to hosts file"
|
||||
echo "$ip $node" >> /etc/hosts
|
||||
fi
|
||||
done
|
||||
|
||||
enable-accounting
|
||||
prepare-package-manager
|
||||
|
||||
# Configure network
|
||||
if [ "${NETWORK_PROVIDER}" != "kubenet" ]; then
|
||||
provision-network-node
|
||||
fi
|
||||
|
||||
write-salt-config kubernetes-pool
|
||||
|
||||
# Generate kubelet and kube-proxy auth file(kubeconfig) if there is not an existing one
|
||||
known_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig"
|
||||
if [[ ! -f "${known_kubeconfig_file}" ]]; then
|
||||
create-salt-kubelet-auth
|
||||
create-salt-kubeproxy-auth
|
||||
else
|
||||
# stop kubelet, let salt start it later
|
||||
systemctl stop kubelet
|
||||
fi
|
||||
|
||||
install-salt
|
||||
add-volume-support
|
||||
|
||||
run-salt
|
||||
|
||||
dnf install -y socat ethtool
|
||||
dnf update -y docker
|
222
vendor/k8s.io/kubernetes/cluster/vagrant/provision-utils.sh
generated
vendored
Executable file
222
vendor/k8s.io/kubernetes/cluster/vagrant/provision-utils.sh
generated
vendored
Executable file
@ -0,0 +1,222 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
function enable-accounting() {
|
||||
mkdir -p /etc/systemd/system.conf.d/
|
||||
cat <<EOF >/etc/systemd/system.conf.d/kubernetes-accounting.conf
|
||||
[Manager]
|
||||
DefaultCPUAccounting=yes
|
||||
DefaultMemoryAccounting=yes
|
||||
EOF
|
||||
systemctl daemon-reload
|
||||
}
|
||||
|
||||
function prepare-package-manager() {
|
||||
echo "Prepare package manager"
|
||||
|
||||
# Useful if a mirror is broken or slow
|
||||
if [ -z "$CUSTOM_FEDORA_REPOSITORY_URL" ]; then
|
||||
echo "fastestmirror=True" >> /etc/dnf/dnf.conf
|
||||
else
|
||||
# remove trailing slash from URL if it's present
|
||||
CUSTOM_FEDORA_REPOSITORY_URL="${CUSTOM_FEDORA_REPOSITORY_URL%/}"
|
||||
sed -i -e "/^metalink=/d" /etc/yum.repos.d/*.repo
|
||||
sed -i -e "s@^#baseurl=http://download.fedoraproject.org/pub/fedora@baseurl=$CUSTOM_FEDORA_REPOSITORY_URL@" /etc/yum.repos.d/*.repo
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
function add-volume-support() {
|
||||
echo "Adding nfs volume support"
|
||||
|
||||
# we need nfs-utils to support volumes
|
||||
dnf install -y nfs-utils
|
||||
}
|
||||
|
||||
function write-salt-config() {
|
||||
local role="$1"
|
||||
|
||||
# Update salt configuration
|
||||
mkdir -p /etc/salt/minion.d
|
||||
|
||||
mkdir -p /srv/salt-overlay/pillar
|
||||
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
|
||||
service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
|
||||
cert_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
||||
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
|
||||
enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'
|
||||
enable_cluster_ui: '$(echo "$ENABLE_CLUSTER_UI" | sed -e "s/'/''/g")'
|
||||
enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
|
||||
logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
|
||||
elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
|
||||
enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
|
||||
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
|
||||
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
|
||||
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
|
||||
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
|
||||
enable_cpu_cfs_quota: '$(echo "$ENABLE_CPU_CFS_QUOTA" | sed -e "s/'/''/g")'
|
||||
network_provider: '$(echo "$NETWORK_PROVIDER" | sed -e "s/'/''/g")'
|
||||
cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
|
||||
opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG" | sed -e "s/'/''/g")'
|
||||
opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG" | sed -e "s/'/''/g")'
|
||||
opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET" | sed -e "s/'/''/g")'
|
||||
e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")'
|
||||
enable_hostpath_provisioner: '$(echo "$ENABLE_HOSTPATH_PROVISIONER" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
|
||||
if [ -n "${EVICTION_HARD:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
eviction_hard: '$(echo "${EVICTION_HARD}" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
|
||||
cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
|
||||
log_level: warning
|
||||
log_level_logfile: warning
|
||||
EOF
|
||||
|
||||
cat <<EOF >/etc/salt/minion.d/grains.conf
|
||||
grains:
|
||||
node_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
||||
publicAddressOverride: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
||||
network_mode: openvswitch
|
||||
networkInterfaceName: '$(echo "$NETWORK_IF_NAME" | sed -e "s/'/''/g")'
|
||||
api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
||||
kubelet_kubeconfig: /srv/salt-overlay/salt/kubelet/kubeconfig
|
||||
cloud: vagrant
|
||||
roles:
|
||||
- $role
|
||||
runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")'
|
||||
docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")'
|
||||
master_extra_sans: '$(echo "$MASTER_EXTRA_SANS" | sed -e "s/'/''/g")'
|
||||
keep_host_etcd: true
|
||||
kube_user: '$(echo "$KUBE_USER" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
}
|
||||
|
||||
function release_not_found() {
|
||||
echo "It looks as if you don't have a compiled version of Kubernetes. If you" >&2
|
||||
echo "are running from a clone of the git repo, please run 'make quick-release'." >&2
|
||||
echo "Note that this requires having Docker installed. If you are running " >&2
|
||||
echo "from a release tarball, something is wrong. Look at " >&2
|
||||
echo "http://kubernetes.io/ for information on how to contact the development team for help." >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
function install-salt() {
|
||||
server_binary_tar="/vagrant/server/kubernetes-server-linux-amd64.tar.gz"
|
||||
if [[ ! -f "$server_binary_tar" ]]; then
|
||||
server_binary_tar="/vagrant/_output/release-tars/kubernetes-server-linux-amd64.tar.gz"
|
||||
fi
|
||||
if [[ ! -f "$server_binary_tar" ]]; then
|
||||
release_not_found
|
||||
fi
|
||||
|
||||
salt_tar="/vagrant/server/kubernetes-salt.tar.gz"
|
||||
if [[ ! -f "$salt_tar" ]]; then
|
||||
salt_tar="/vagrant/_output/release-tars/kubernetes-salt.tar.gz"
|
||||
fi
|
||||
if [[ ! -f "$salt_tar" ]]; then
|
||||
release_not_found
|
||||
fi
|
||||
|
||||
echo "Running release install script"
|
||||
rm -rf /kube-install
|
||||
mkdir -p /kube-install
|
||||
pushd /kube-install
|
||||
tar xzf "$salt_tar"
|
||||
cp "$server_binary_tar" .
|
||||
./kubernetes/saltbase/install.sh "${server_binary_tar##*/}"
|
||||
popd
|
||||
|
||||
if ! which salt-call >/dev/null 2>&1; then
|
||||
# Install salt from official repositories.
|
||||
# Need to enable testing-repos to get version of salt with fix for dnf-core-plugins
|
||||
dnf config-manager --set-enabled updates-testing
|
||||
dnf install -y salt-minion
|
||||
|
||||
# Fedora >= 23 includes salt packages but the bootstrap is
|
||||
# creating configuration for a (non-existent) salt repo anyway.
|
||||
# Remove the invalid repo to prevent dnf from warning about it on
|
||||
# every update. Assume this problem is specific to Fedora 23 and
|
||||
# will fixed by the time another version of Fedora lands.
|
||||
local fedora_version=$(grep 'VERSION_ID' /etc/os-release | sed 's+VERSION_ID=++')
|
||||
if [[ "${fedora_version}" = '23' ]]; then
|
||||
local repo_file='/etc/yum.repos.d/saltstack-salt-fedora-23.repo'
|
||||
if [[ -f "${repo_file}" ]]; then
|
||||
rm "${repo_file}"
|
||||
fi
|
||||
fi
|
||||
|
||||
fi
|
||||
}
|
||||
|
||||
function run-salt() {
|
||||
echo " Now waiting for the Salt provisioning process to complete on this machine."
|
||||
echo " This can take some time based on your network, disk, and cpu speed."
|
||||
salt-call --local state.highstate
|
||||
}
|
||||
|
||||
function create-salt-kubelet-auth() {
|
||||
local -r kubelet_kubeconfig_folder="/srv/salt-overlay/salt/kubelet"
|
||||
mkdir -p "${kubelet_kubeconfig_folder}"
|
||||
(umask 077;
|
||||
cat > "${kubelet_kubeconfig_folder}/kubeconfig" << EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
server: "https://${MASTER_IP}"
|
||||
insecure-skip-tls-verify: true
|
||||
name: local
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kubelet
|
||||
name: service-account-context
|
||||
current-context: service-account-context
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
token: ${KUBELET_TOKEN}
|
||||
EOF
|
||||
)
|
||||
}
|
||||
|
||||
function create-salt-kubeproxy-auth() {
|
||||
kube_proxy_kubeconfig_folder="/srv/salt-overlay/salt/kube-proxy"
|
||||
mkdir -p "${kube_proxy_kubeconfig_folder}"
|
||||
(umask 077;
|
||||
cat > "${kube_proxy_kubeconfig_folder}/kubeconfig" << EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
name: local
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kube-proxy
|
||||
name: service-account-context
|
||||
current-context: service-account-context
|
||||
users:
|
||||
- name: kube-proxy
|
||||
user:
|
||||
token: ${KUBE_PROXY_TOKEN}
|
||||
EOF
|
||||
)
|
||||
}
|
389
vendor/k8s.io/kubernetes/cluster/vagrant/util.sh
generated
vendored
Executable file
389
vendor/k8s.io/kubernetes/cluster/vagrant/util.sh
generated
vendored
Executable file
@ -0,0 +1,389 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts.
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/cluster/vagrant/${KUBE_CONFIG_FILE-"config-default.sh"}"
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
|
||||
function detect-master () {
|
||||
KUBE_MASTER_IP=$MASTER_IP
|
||||
echo "KUBE_MASTER_IP: ${KUBE_MASTER_IP}" 1>&2
|
||||
}
|
||||
|
||||
# Get node IP addresses and store in KUBE_NODE_IP_ADDRESSES[]
|
||||
function detect-nodes {
|
||||
echo "Nodes already detected" 1>&2
|
||||
KUBE_NODE_IP_ADDRESSES=("${NODE_IPS[@]}")
|
||||
}
|
||||
|
||||
# Verify prereqs on host machine Also sets exports USING_KUBE_SCRIPTS=true so
|
||||
# that our Vagrantfile doesn't error out.
|
||||
function verify-prereqs {
|
||||
for x in vagrant; do
|
||||
if ! which "$x" >/dev/null; then
|
||||
echo "Can't find $x in PATH, please fix and retry."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
local vagrant_plugins=$(vagrant plugin list | sed '-es% .*$%%' '-es% *% %g' | tr ' ' $'\n')
|
||||
local providers=(
|
||||
# Format is:
|
||||
# provider_ctl_executable vagrant_provider_name vagrant_provider_plugin_re
|
||||
# either provider_ctl_executable or vagrant_provider_plugin_re can
|
||||
# be blank (i.e., '') if none is needed by Vagrant (see, e.g.,
|
||||
# virtualbox entry)
|
||||
'' vmware_fusion vagrant-vmware-fusion
|
||||
'' vmware_workstation vagrant-vmware-workstation
|
||||
prlctl parallels vagrant-parallels
|
||||
VBoxManage virtualbox ''
|
||||
virsh libvirt vagrant-libvirt
|
||||
'' vsphere vagrant-vsphere
|
||||
)
|
||||
local provider_found=''
|
||||
local provider_bin
|
||||
local provider_name
|
||||
local provider_plugin_re
|
||||
|
||||
while [ "${#providers[@]}" -gt 0 ]; do
|
||||
provider_bin=${providers[0]}
|
||||
provider_name=${providers[1]}
|
||||
provider_plugin_re=${providers[2]}
|
||||
providers=("${providers[@]:3}")
|
||||
|
||||
# If the provider is explicitly set, look only for that provider
|
||||
if [ -n "${VAGRANT_DEFAULT_PROVIDER:-}" ] \
|
||||
&& [ "${VAGRANT_DEFAULT_PROVIDER}" != "${provider_name}" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
if ([ -z "${provider_bin}" ] \
|
||||
|| which "${provider_bin}" >/dev/null 2>&1) \
|
||||
&& ([ -z "${provider_plugin_re}" ] \
|
||||
|| [ -n "$(echo "${vagrant_plugins}" | grep -E "^${provider_plugin_re}$")" ]); then
|
||||
provider_found="${provider_name}"
|
||||
# Stop after finding the first viable provider
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z "${provider_found}" ]; then
|
||||
if [ -n "${VAGRANT_DEFAULT_PROVIDER:-}" ]; then
|
||||
echo "Can't find the necessary components for the ${VAGRANT_DEFAULT_PROVIDER} vagrant provider."
|
||||
echo "Possible reasons could be: "
|
||||
echo -e "\t- vmrun utility is not in your path"
|
||||
echo -e "\t- Vagrant plugin was not found."
|
||||
echo -e "\t- VAGRANT_DEFAULT_PROVIDER is set, but not found."
|
||||
echo "Please fix and retry."
|
||||
else
|
||||
echo "Can't find the necessary components for any viable vagrant providers (e.g., virtualbox), please fix and retry."
|
||||
fi
|
||||
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Set VAGRANT_CWD to KUBE_ROOT so that we find the right Vagrantfile no
|
||||
# matter what directory the tools are called from.
|
||||
export VAGRANT_CWD="${KUBE_ROOT}"
|
||||
|
||||
export USING_KUBE_SCRIPTS=true
|
||||
}
|
||||
|
||||
# Create a set of provision scripts for the master and each of the nodes
|
||||
function create-provision-scripts {
|
||||
kube::util::ensure-temp-dir
|
||||
|
||||
(
|
||||
echo "#! /bin/bash"
|
||||
echo-kube-env
|
||||
echo "NODE_IP='${MASTER_IP}'"
|
||||
echo "CONTAINER_ADDR='${MASTER_CONTAINER_ADDR}'"
|
||||
echo "CONTAINER_NETMASK='${MASTER_CONTAINER_NETMASK}'"
|
||||
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-utils.sh"
|
||||
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network-master.sh"
|
||||
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-master.sh"
|
||||
) > "${KUBE_TEMP}/master-start.sh"
|
||||
|
||||
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
|
||||
(
|
||||
echo "#! /bin/bash"
|
||||
echo-kube-env
|
||||
echo "NODE_NAME=(${NODE_NAMES[$i]})"
|
||||
echo "NODE_IP='${NODE_IPS[$i]}'"
|
||||
echo "NODE_ID='$i'"
|
||||
echo "CONTAINER_ADDR='${NODE_CONTAINER_ADDRS[$i]}'"
|
||||
echo "CONTAINER_NETMASK='${NODE_CONTAINER_NETMASKS[$i]}'"
|
||||
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-utils.sh"
|
||||
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network-node.sh"
|
||||
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-node.sh"
|
||||
) > "${KUBE_TEMP}/node-start-${i}.sh"
|
||||
done
|
||||
}
|
||||
|
||||
function echo-kube-env() {
|
||||
echo "KUBE_ROOT=/vagrant"
|
||||
echo "INSTANCE_PREFIX='${INSTANCE_PREFIX}'"
|
||||
echo "MASTER_NAME='${INSTANCE_PREFIX}-master'"
|
||||
echo "MASTER_IP='${MASTER_IP}'"
|
||||
echo "NODE_NAMES=(${NODE_NAMES[@]})"
|
||||
echo "NODE_IPS=(${NODE_IPS[@]})"
|
||||
echo "DEFAULT_NETWORK_IF_NAME=${DEFAULT_NETWORK_IF_NAME}"
|
||||
echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'"
|
||||
echo "CLUSTER_IP_RANGE='${CLUSTER_IP_RANGE}'"
|
||||
echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'"
|
||||
echo "NODE_CONTAINER_NETMASKS='${NODE_CONTAINER_NETMASKS[@]}'"
|
||||
echo "NODE_CONTAINER_SUBNETS=(${NODE_CONTAINER_SUBNETS[@]})"
|
||||
echo "SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
|
||||
echo "MASTER_USER='${MASTER_USER}'"
|
||||
echo "MASTER_PASSWD='${MASTER_PASSWD}'"
|
||||
echo "KUBE_USER='${KUBE_USER}'"
|
||||
echo "KUBE_PASSWORD='${KUBE_PASSWORD}'"
|
||||
echo "KUBE_BEARER_TOKEN='${KUBE_BEARER_TOKEN}'"
|
||||
echo "ENABLE_CLUSTER_MONITORING='${ENABLE_CLUSTER_MONITORING}'"
|
||||
echo "ENABLE_CLUSTER_LOGGING='${ENABLE_CLUSTER_LOGGING:-false}'"
|
||||
echo "ELASTICSEARCH_LOGGING_REPLICAS='${ELASTICSEARCH_LOGGING_REPLICAS:-1}'"
|
||||
echo "ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'"
|
||||
echo "ENABLE_CLUSTER_UI='${ENABLE_CLUSTER_UI}'"
|
||||
echo "ENABLE_HOSTPATH_PROVISIONER='${ENABLE_HOSTPATH_PROVISIONER:-false}'"
|
||||
echo "LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'"
|
||||
echo "ENABLE_CLUSTER_DNS='${ENABLE_CLUSTER_DNS:-false}'"
|
||||
echo "DNS_SERVER_IP='${DNS_SERVER_IP:-}'"
|
||||
echo "DNS_DOMAIN='${DNS_DOMAIN:-}'"
|
||||
echo "RUNTIME_CONFIG='${RUNTIME_CONFIG:-}'"
|
||||
echo "ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'"
|
||||
echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS:-}'"
|
||||
echo "VAGRANT_DEFAULT_PROVIDER='${VAGRANT_DEFAULT_PROVIDER:-}'"
|
||||
echo "KUBELET_TOKEN='${KUBELET_TOKEN:-}'"
|
||||
echo "KUBE_PROXY_TOKEN='${KUBE_PROXY_TOKEN:-}'"
|
||||
echo "MASTER_EXTRA_SANS='${MASTER_EXTRA_SANS:-}'"
|
||||
echo "ENABLE_CPU_CFS_QUOTA='${ENABLE_CPU_CFS_QUOTA}'"
|
||||
echo "NETWORK_PROVIDER='${NETWORK_PROVIDER:-}'"
|
||||
echo "OPENCONTRAIL_TAG='${OPENCONTRAIL_TAG:-}'"
|
||||
echo "OPENCONTRAIL_KUBERNETES_TAG='${OPENCONTRAIL_KUBERNETES_TAG:-}'"
|
||||
echo "OPENCONTRAIL_PUBLIC_SUBNET='${OPENCONTRAIL_PUBLIC_SUBNET:-}'"
|
||||
echo "E2E_STORAGE_TEST_ENVIRONMENT='${E2E_STORAGE_TEST_ENVIRONMENT:-}'"
|
||||
echo "CUSTOM_FEDORA_REPOSITORY_URL='${CUSTOM_FEDORA_REPOSITORY_URL:-}'"
|
||||
echo "EVICTION_HARD='${EVICTION_HARD:-}'"
|
||||
}
|
||||
|
||||
function verify-cluster {
|
||||
# TODO: How does the user know the difference between "tak[ing] some
|
||||
# time" and "loop[ing] forever"? Can we give more specific feedback on
|
||||
# whether "an error" has occurred?
|
||||
echo "Each machine instance has been created/updated."
|
||||
echo " Now waiting for the Salt provisioning process to complete on each machine."
|
||||
echo " This can take some time based on your network, disk, and cpu speed."
|
||||
echo " It is possible for an error to occur during Salt provision of cluster and this could loop forever."
|
||||
|
||||
# verify master has all required daemons
|
||||
echo "Validating master"
|
||||
local machine="master"
|
||||
local -a required_processes=("kube-apiserver" "kube-scheduler" "kube-controller-manager" "kubelet" "docker")
|
||||
local validated="1"
|
||||
until [[ "$validated" == "0" ]]; do
|
||||
validated="0"
|
||||
for process in "${required_processes[@]}"; do
|
||||
vagrant ssh "${machine}" -c "pgrep -f ${process}" >/dev/null 2>&1 || {
|
||||
printf "."
|
||||
validated="1"
|
||||
sleep 2
|
||||
}
|
||||
done
|
||||
done
|
||||
|
||||
# verify each node has all required daemons
|
||||
local i
|
||||
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
|
||||
echo "Validating ${VAGRANT_NODE_NAMES[$i]}"
|
||||
local machine=${VAGRANT_NODE_NAMES[$i]}
|
||||
local -a required_processes=("kube-proxy" "kubelet" "docker")
|
||||
local validated="1"
|
||||
until [[ "${validated}" == "0" ]]; do
|
||||
validated="0"
|
||||
for process in "${required_processes[@]}"; do
|
||||
vagrant ssh "${machine}" -c "pgrep -f ${process}" >/dev/null 2>&1 || {
|
||||
printf "."
|
||||
validated="1"
|
||||
sleep 2
|
||||
}
|
||||
done
|
||||
done
|
||||
done
|
||||
|
||||
echo
|
||||
echo "Waiting for each node to be registered with cloud provider"
|
||||
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
|
||||
local validated="0"
|
||||
start="$(date +%s)"
|
||||
until [[ "$validated" == "1" ]]; do
|
||||
now="$(date +%s)"
|
||||
# Timeout set to 3 minutes
|
||||
if [ $((now - start)) -gt 180 ]; then
|
||||
echo "Timeout while waiting for echo node to be registered with cloud provider"
|
||||
exit 2
|
||||
fi
|
||||
local nodes=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o name)
|
||||
validated=$(echo $nodes | grep -c "${NODE_NAMES[i]}") || {
|
||||
printf "."
|
||||
sleep 2
|
||||
validated="0"
|
||||
}
|
||||
done
|
||||
done
|
||||
|
||||
# By this time, all kube api calls should work, so no need to loop and retry.
|
||||
echo "Validating we can run kubectl commands."
|
||||
vagrant ssh master --command "kubectl get pods" || {
|
||||
echo "WARNING: kubectl to localhost failed. This could mean localhost is not bound to an IP"
|
||||
}
|
||||
|
||||
(
|
||||
# ensures KUBECONFIG is set
|
||||
get-kubeconfig-basicauth
|
||||
get-kubeconfig-bearertoken
|
||||
echo
|
||||
echo "Kubernetes cluster is running."
|
||||
echo
|
||||
echo "The master is running at:"
|
||||
echo
|
||||
echo " https://${MASTER_IP}"
|
||||
echo
|
||||
echo "Administer and visualize its resources using Cockpit:"
|
||||
echo
|
||||
echo " https://${MASTER_IP}:9090"
|
||||
echo
|
||||
echo "For more information on Cockpit, visit http://cockpit-project.org"
|
||||
echo
|
||||
echo "The user name and password to use is located in ${KUBECONFIG}"
|
||||
echo
|
||||
)
|
||||
}
|
||||
|
||||
# Instantiate a kubernetes cluster
|
||||
function kube-up {
|
||||
load-or-gen-kube-basicauth
|
||||
load-or-gen-kube-bearertoken
|
||||
get-tokens
|
||||
create-provision-scripts
|
||||
|
||||
vagrant up --no-parallel
|
||||
|
||||
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
|
||||
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
|
||||
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
|
||||
export CONTEXT="vagrant"
|
||||
|
||||
(
|
||||
umask 077
|
||||
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null
|
||||
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null
|
||||
vagrant ssh master -- sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null
|
||||
|
||||
# Update the user's kubeconfig to include credentials for this apiserver.
|
||||
create-kubeconfig
|
||||
)
|
||||
|
||||
verify-cluster
|
||||
}
|
||||
|
||||
# Delete a kubernetes cluster
|
||||
function kube-down {
|
||||
vagrant destroy -f
|
||||
}
|
||||
|
||||
# Update a kubernetes cluster with latest source
|
||||
function kube-push {
|
||||
get-kubeconfig-basicauth
|
||||
get-kubeconfig-bearertoken
|
||||
create-provision-scripts
|
||||
vagrant provision
|
||||
}
|
||||
|
||||
# Execute prior to running tests to build a release if required for env
|
||||
function test-build-release {
|
||||
# Make a release
|
||||
"${KUBE_ROOT}/build/release.sh"
|
||||
}
|
||||
|
||||
# Execute prior to running tests to initialize required structure
|
||||
function test-setup {
|
||||
"${KUBE_ROOT}/cluster/kube-up.sh"
|
||||
echo "Vagrant test setup complete" 1>&2
|
||||
}
|
||||
|
||||
# Execute after running tests to perform any required clean-up
|
||||
function test-teardown {
|
||||
kube-down
|
||||
}
|
||||
|
||||
# Find the node name based on the IP address
|
||||
function find-vagrant-name-by-ip {
|
||||
local ip="$1"
|
||||
local ip_pattern="${NODE_IP_BASE}(.*)"
|
||||
|
||||
# This is subtle. We map 10.245.2.2 -> node-1. We do this by matching a
|
||||
# regexp and using the capture to construct the name.
|
||||
[[ $ip =~ $ip_pattern ]] || {
|
||||
return 1
|
||||
}
|
||||
|
||||
echo "node-$((${BASH_REMATCH[1]} - 1))"
|
||||
}
|
||||
|
||||
# Find the vagrant machine name based on the host name of the node
|
||||
function find-vagrant-name-by-node-name {
|
||||
local ip="$1"
|
||||
if [[ "$ip" == "${INSTANCE_PREFIX}-master" ]]; then
|
||||
echo "master"
|
||||
return $?
|
||||
fi
|
||||
local ip_pattern="${INSTANCE_PREFIX}-node-(.*)"
|
||||
|
||||
[[ $ip =~ $ip_pattern ]] || {
|
||||
return 1
|
||||
}
|
||||
|
||||
echo "node-${BASH_REMATCH[1]}"
|
||||
}
|
||||
|
||||
|
||||
# SSH to a node by name or IP ($1) and run a command ($2).
|
||||
function ssh-to-node {
|
||||
local node="$1"
|
||||
local cmd="$2"
|
||||
local machine
|
||||
|
||||
machine=$(find-vagrant-name-by-ip $node) || true
|
||||
[[ -n ${machine-} ]] || machine=$(find-vagrant-name-by-node-name $node) || true
|
||||
[[ -n ${machine-} ]] || {
|
||||
echo "Cannot find machine to ssh to: $1"
|
||||
return 1
|
||||
}
|
||||
|
||||
vagrant ssh "${machine}" -c "${cmd}"
|
||||
}
|
||||
|
||||
# Perform preparations required to run e2e tests
|
||||
function prepare-e2e() {
|
||||
echo "Vagrant doesn't need special preparations for e2e tests" 1>&2
|
||||
}
|
||||
|
||||
function get-tokens() {
|
||||
KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
}
|
Reference in New Issue
Block a user