vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

54
vendor/k8s.io/kubernetes/cluster/gce/BUILD generated vendored Normal file
View File

@ -0,0 +1,54 @@
package(default_visibility = ["//visibility:public"])
load("@io_bazel//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
load("@io_kubernetes_build//defs:build.bzl", "release_filegroup")
pkg_tar(
name = "gci-trusty-manifests",
files = [
"container-linux/configure-helper.sh",
"gci/configure-helper.sh",
"gci/health-monitor.sh",
"//cluster/gce/gci/mounter",
],
mode = "0755",
strip_prefix = ".",
# pkg_tar doesn't support renaming the files we add, so instead create symlinks.
symlinks = {
"container-linux-configure-helper.sh": "container-linux/configure-helper.sh",
"gci-configure-helper.sh": "gci/configure-helper.sh",
"health-monitor.sh": "gci/health-monitor.sh",
"gci-mounter": "gci/mounter/mounter",
"trusty-configure-helper.sh": "trusty/configure-helper.sh",
},
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//cluster/gce/addons:all-srcs",
"//cluster/gce/gci/mounter:all-srcs",
],
tags = ["automanaged"],
)
# Having the configure-vm.sh script and and trusty code from the GCE cluster
# deploy hosted with the release is useful for GKE.
# This list should match the list in kubernetes/release/lib/releaselib.sh.
release_filegroup(
name = "gcs-release-artifacts",
srcs = [
"configure-vm.sh",
"gci/configure.sh",
"gci/master.yaml",
"gci/node.yaml",
],
)

10
vendor/k8s.io/kubernetes/cluster/gce/OWNERS generated vendored Normal file
View File

@ -0,0 +1,10 @@
reviewers:
- bowei
- gmarek
- jszczepkowski
- vishh
approvers:
- bowei
- gmarek
- jszczepkowski
- vishh

38
vendor/k8s.io/kubernetes/cluster/gce/addons/BUILD generated vendored Normal file
View File

@ -0,0 +1,38 @@
package(default_visibility = ["//visibility:public"])
load("@io_bazel//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
filegroup(
name = "addon-srcs",
srcs = glob(
[
"**/*.json",
"**/*.yaml",
"**/*.yaml.in",
],
exclude = ["**/*demo*/**"],
),
)
pkg_tar(
name = "addons",
extension = "tar.gz",
files = [
":addon-srcs",
],
mode = "0644",
strip_prefix = ".",
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,7 @@
# GCE Cluster addons
These cluster add-ons are specific to GCE and GKE clusters. The GCE-specific addon directory is
merged with the general cluster addon directory at release, so addon paths (relative to the addon
directory) must be unique across the 2 directory structures.
More details on addons in general can be found [here](../../addons/README.md).

View File

@ -0,0 +1,15 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gce:podsecuritypolicy:kube-proxy
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gce:podsecuritypolicy:privileged
subjects:
- kind: ServiceAccount
name: kube-proxy
namespace: kube-system

View File

@ -0,0 +1,17 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gce:podsecuritypolicy:unprivileged-addon
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: gce:podsecuritypolicy:unprivileged-addon
subjects:
- kind: Group
# All service accounts in the kube-system namespace are allowed to use this.
name: system:serviceaccounts:kube-system
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,24 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gce:podsecuritypolicy:nodes
namespace: kube-system
annotations:
kubernetes.io/description: 'Allow nodes to create privileged pods. Should
be used in combination with the NodeRestriction admission plugin to limit
nodes to mirror pods bound to themselves.'
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: 'true'
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gce:podsecuritypolicy:privileged
subjects:
- kind: Group
apiGroup: rbac.authorization.k8s.io
name: system:nodes
- kind: User
apiGroup: rbac.authorization.k8s.io
# Legacy node ID
name: kubelet

View File

@ -0,0 +1,18 @@
apiVersion: rbac.authorization.k8s.io/v1
# The persistent volume binder creates recycler pods in the default namespace,
# but the addon manager only creates namespaced objects in the kube-system
# namespace, so this is a ClusterRoleBinding.
kind: ClusterRoleBinding
metadata:
name: gce:podsecuritypolicy:persistent-volume-binder
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gce:podsecuritypolicy:persistent-volume-binder
subjects:
- kind: ServiceAccount
name: persistent-volume-binder
namespace: kube-system

View File

@ -0,0 +1,20 @@
apiVersion: rbac.authorization.k8s.io/v1
# The persistent volume binder creates recycler pods in the default namespace,
# but the addon manager only creates namespaced objects in the kube-system
# namespace, so this is a ClusterRole.
kind: ClusterRole
metadata:
name: gce:podsecuritypolicy:persistent-volume-binder
namespace: default
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- extensions
resourceNames:
- gce.persistent-volume-binder
resources:
- podsecuritypolicies
verbs:
- use

View File

@ -0,0 +1,29 @@
apiVersion: extensions/v1beta1
kind: PodSecurityPolicy
metadata:
name: gce.persistent-volume-binder
annotations:
kubernetes.io/description: 'Policy used by the persistent-volume-binder
(a.k.a. persistentvolume-controller) to run recycler pods.'
# TODO: This should use the default seccomp profile.
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
labels:
kubernetes.io/cluster-service: 'true'
addonmanager.kubernetes.io/mode: Reconcile
spec:
privileged: false
volumes:
- 'nfs'
- 'secret' # Required for service account credentials.
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: false

View File

@ -0,0 +1,16 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: gce:podsecuritypolicy:privileged
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- extensions
resourceNames:
- gce.privileged
resources:
- podsecuritypolicies
verbs:
- use

View File

@ -0,0 +1,33 @@
apiVersion: extensions/v1beta1
kind: PodSecurityPolicy
metadata:
name: gce.privileged
annotations:
kubernetes.io/description: 'privileged allows full unrestricted access to
pod features, as if the PodSecurityPolicy controller was not enabled.'
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
privileged: true
allowPrivilegeEscalation: true
allowedCapabilities:
- '*'
volumes:
- '*'
hostNetwork: true
hostPorts:
- min: 0
max: 65535
hostIPC: true
hostPID: true
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: false

View File

@ -0,0 +1,17 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: gce:podsecuritypolicy:unprivileged-addon
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- extensions
resourceNames:
- gce.unprivileged-addon
resources:
- podsecuritypolicies
verbs:
- use

View File

@ -0,0 +1,38 @@
apiVersion: extensions/v1beta1
kind: PodSecurityPolicy
metadata:
name: gce.unprivileged-addon
annotations:
kubernetes.io/description: 'This policy grants the minimum ammount of
privilege necessary to run non-privileged kube-system pods. This policy is
not intended for use outside of kube-system, and may include further
restrictions in the future.'
# TODO: Addons should use the default seccomp profile.
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
# 'runtime/default' is already the default, but must be filled in on the
# pod to pass admission.
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
labels:
kubernetes.io/cluster-service: 'true'
addonmanager.kubernetes.io/mode: Reconcile
spec:
privileged: false
allowPrivilegeEscalation: false
volumes:
- 'emptyDir'
- 'configMap'
- 'secret'
hostNetwork: false
hostIPC: false
hostPID: false
# TODO: The addons using this profile should not run as root.
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: false

101
vendor/k8s.io/kubernetes/cluster/gce/config-common.sh generated vendored Normal file
View File

@ -0,0 +1,101 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Vars assumed:
# NUM_NODES
function get-master-size {
local suggested_master_size=1
if [[ "${NUM_NODES}" -gt "5" ]]; then
suggested_master_size=2
fi
if [[ "${NUM_NODES}" -gt "10" ]]; then
suggested_master_size=4
fi
if [[ "${NUM_NODES}" -gt "100" ]]; then
suggested_master_size=8
fi
if [[ "${NUM_NODES}" -gt "250" ]]; then
suggested_master_size=16
fi
if [[ "${NUM_NODES}" -gt "500" ]]; then
suggested_master_size=32
fi
if [[ "${NUM_NODES}" -gt "3000" ]]; then
suggested_master_size=64
fi
echo "${suggested_master_size}"
}
# Vars assumed:
# NUM_NODES
function get-master-root-disk-size() {
local suggested_master_root_disk_size="20GB"
if [[ "${NUM_NODES}" -gt "1000" ]]; then
suggested_master_root_disk_size="50GB"
fi
if [[ "${NUM_NODES}" -gt "2000" ]]; then
suggested_master_root_disk_size="100GB"
fi
echo "${suggested_master_root_disk_size}"
}
# Vars assumed:
# NUM_NODES
function get-master-disk-size() {
local suggested_master_disk_size="20GB"
if [[ "${NUM_NODES}" -gt "1000" ]]; then
suggested_master_disk_size="100GB"
fi
if [[ "${NUM_NODES}" -gt "2000" ]]; then
suggested_master_disk_size="200GB"
fi
echo "${suggested_master_disk_size}"
}
function get-node-ip-range {
if [[ -n "${NODE_IP_RANGE:-}" ]]; then
>&2 echo "Using user provided NODE_IP_RANGE: ${NODE_IP_RANGE}"
echo "${NODE_IP_RANGE}"
return
fi
local suggested_range="10.40.0.0/22"
if [[ "${NUM_NODES}" -gt 1000 ]]; then
suggested_range="10.40.0.0/21"
fi
if [[ "${NUM_NODES}" -gt 2000 ]]; then
suggested_range="10.40.0.0/20"
fi
if [[ "${NUM_NODES}" -gt 4000 ]]; then
suggested_range="10.40.0.0/19"
fi
echo "${suggested_range}"
}
function get-cluster-ip-range {
local suggested_range="10.64.0.0/14"
if [[ "${NUM_NODES}" -gt 1000 ]]; then
suggested_range="10.64.0.0/13"
fi
if [[ "${NUM_NODES}" -gt 2000 ]]; then
suggested_range="10.64.0.0/12"
fi
if [[ "${NUM_NODES}" -gt 4000 ]]; then
suggested_range="10.64.0.0/11"
fi
echo "${suggested_range}"
}
NODE_SCOPES="${NODE_SCOPES:-monitoring,logging-write,storage-ro}"

402
vendor/k8s.io/kubernetes/cluster/gce/config-default.sh generated vendored Executable file
View File

@ -0,0 +1,402 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(jbeda): Provide a way to override project
# gcloud multiplexing for shared GCE/GKE tests.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/gce/config-common.sh"
# Specifying KUBE_GCE_API_ENDPOINT will override the default GCE Compute API endpoint (https://www.googleapis.com/compute/v1/).
# This endpoint has to be pointing to v1 api. For example, https://www.googleapis.com/compute/staging_v1/
GCE_API_ENDPOINT=${KUBE_GCE_API_ENDPOINT:-}
GCLOUD=gcloud
ZONE=${KUBE_GCE_ZONE:-us-central1-b}
REGION=${ZONE%-*}
RELEASE_REGION_FALLBACK=${RELEASE_REGION_FALLBACK:-false}
REGIONAL_KUBE_ADDONS=${REGIONAL_KUBE_ADDONS:-true}
NODE_SIZE=${NODE_SIZE:-n1-standard-2}
NUM_NODES=${NUM_NODES:-3}
MASTER_SIZE=${MASTER_SIZE:-n1-standard-$(get-master-size)}
MASTER_MIN_CPU_ARCHITECTURE=${MASTER_MIN_CPU_ARCHITECTURE:-} # To allow choosing better architectures.
MASTER_DISK_TYPE=pd-ssd
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-$(get-master-disk-size)}
MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-$(get-master-root-disk-size)}
NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard}
NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
NODE_LOCAL_SSDS=${NODE_LOCAL_SSDS:-0}
# An extension to local SSDs allowing users to specify block/fs and SCSI/NVMe devices
# Format of this variable will be "#,scsi/nvme,block/fs" you can specify multiple
# configurations by seperating them by a semi-colon ex. "2,scsi,fs;1,nvme,block"
# is a request for 2 SCSI formatted and mounted SSDs and 1 NVMe block device SSD.
NODE_LOCAL_SSDS_EXT=${NODE_LOCAL_SSDS_EXT:-}
# Accelerators to be attached to each node. Format "type=<accelerator-type>,count=<accelerator-count>"
# More information on available GPUs here - https://cloud.google.com/compute/docs/gpus/
NODE_ACCELERATORS=${NODE_ACCELERATORS:-""}
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true}
PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false}
PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false}
KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true}
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-} # default value calculated below
CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false}
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
if [[ "${MASTER_OS_DISTRIBUTION}" == "coreos" ]]; then
MASTER_OS_DISTRIBUTION="container-linux"
fi
if [[ "${NODE_OS_DISTRIBUTION}" == "coreos" ]]; then
NODE_OS_DISTRIBUTION="container-linux"
fi
if [[ "${MASTER_OS_DISTRIBUTION}" == "cos" ]]; then
MASTER_OS_DISTRIBUTION="gci"
fi
if [[ "${NODE_OS_DISTRIBUTION}" == "cos" ]]; then
NODE_OS_DISTRIBUTION="gci"
fi
# GPUs supported in GCE do not have compatible drivers in Debian 7.
if [[ "${NODE_OS_DISTRIBUTION}" == "debian" ]]; then
NODE_ACCELERATORS=""
fi
# By default a cluster will be started with the master and nodes
# on Container-optimized OS (cos, previously known as gci). If
# you are updating the os image versions, update this variable.
# Also please update corresponding image for node e2e at:
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
CVM_VERSION=${CVM_VERSION:-container-vm-v20170627}
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-60-9592-90-0}
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}}
NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-cos-cloud}
NODE_SERVICE_ACCOUNT=${KUBE_GCE_NODE_SERVICE_ACCOUNT:-default}
CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker}
CONTAINER_RUNTIME_ENDPOINT=${KUBE_CONTAINER_RUNTIME_ENDPOINT:-}
LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-docker load -i}
RKT_VERSION=${KUBE_RKT_VERSION:-1.23.0}
RKT_STAGE1_IMAGE=${KUBE_RKT_STAGE1_IMAGE:-coreos.com/rkt/stage1-coreos}
# MASTER_EXTRA_METADATA is the extra instance metadata on master instance separated by commas.
MASTER_EXTRA_METADATA=${KUBE_MASTER_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}}
# MASTER_EXTRA_METADATA is the extra instance metadata on node instance separated by commas.
NODE_EXTRA_METADATA=${KUBE_NODE_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}}
NETWORK=${KUBE_GCE_NETWORK:-default}
# Enable network deletion by default (for kube-down), unless we're using 'default' network.
if [[ "${NETWORK}" == "default" ]]; then
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-false}
else
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-true}
fi
if [[ "${CREATE_CUSTOM_NETWORK}" == true ]]; then
SUBNETWORK="${SUBNETWORK:-${NETWORK}-custom-subnet}"
fi
INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-kubernetes}"
CLUSTER_NAME="${CLUSTER_NAME:-${INSTANCE_PREFIX}}"
MASTER_NAME="${INSTANCE_PREFIX}-master"
AGGREGATOR_MASTER_NAME="${INSTANCE_PREFIX}-aggregator"
INITIAL_ETCD_CLUSTER="${MASTER_NAME}"
ETCD_QUORUM_READ="${ENABLE_ETCD_QUORUM_READ:-false}"
MASTER_TAG="${INSTANCE_PREFIX}-master"
NODE_TAG="${INSTANCE_PREFIX}-minion"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-$(get-cluster-ip-range)}"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
# NODE_IP_RANGE is used when ENABLE_IP_ALIASES=true or CREATE_CUSTOM_NETWORK=true.
# It is the primary range in the subnet and is the range used for node instance IPs.
NODE_IP_RANGE="$(get-node-ip-range)"
NODE_SCOPES="${NODE_SCOPES:-monitoring,logging-write,storage-ro}"
# Extra docker options for nodes.
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-}"
SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/16}" # formerly PORTAL_NET
ALLOCATE_NODE_CIDRS=true
# When set to true, Docker Cache is enabled by default as part of the cluster bring up.
ENABLE_DOCKER_REGISTRY_CACHE=true
# Optional: Deploy a L7 loadbalancer controller to fulfill Ingress requests:
# glbc - CE L7 Load Balancer Controller
ENABLE_L7_LOADBALANCING="${KUBE_ENABLE_L7_LOADBALANCING:-glbc}"
# Optional: Cluster monitoring to setup as part of the cluster bring up:
# none - No cluster monitoring setup
# influxdb - Heapster, InfluxDB, and Grafana
# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging
# stackdriver - Heapster, Google Cloud Monitoring (schema container), and Google Cloud Logging
# googleinfluxdb - Enable influxdb and google (except GCM)
# standalone - Heapster only. Metrics available via Heapster REST API.
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
# Optional: Enable Metrics Server. Metrics Server should be enable everywhere,
# since it's a critical component, but in the first release we need a way to disable
# this in case of stability issues.
# TODO(piosz) remove this option once Metrics Server became a stable thing.
ENABLE_METRICS_SERVER="${KUBE_ENABLE_METRICS_SERVER:-true}"
# Optional: Metadata agent to setup as part of the cluster bring up:
# none - No metadata agent
# stackdriver - Stackdriver metadata agent
# Metadata agent is a daemon set that provides metadata of kubernetes objects
# running on the same node for exporting metrics and logs.
ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}"
# Version tag of metadata agent
METADATA_AGENT_VERSION="${KUBE_METADATA_AGENT_VERSION:-0.2-0.0.13-5-watch}"
# One special node out of NUM_NODES would be created of this type if specified.
# Useful for scheduling heapster in large clusters with nodes of small size.
HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
# Historically fluentd was a manifest pod and then was migrated to DaemonSet.
# To avoid situation during cluster upgrade when there are two instances
# of fluentd running on a node, kubelet need to mark node on which
# fluentd is not running as a manifest pod with appropriate label.
# TODO(piosz): remove this in 1.8
NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
# NON_MASTER_NODE_LABELS are labels will only be applied on non-master nodes.
NON_MASTER_NODE_LABELS="${KUBE_NON_MASTER_NODE_LABELS:-}"
# To avoid running Calico on a node that is not configured appropriately,
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
if [[ ${NETWORK_POLICY_PROVIDER:-} == "calico" ]]; then
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}projectcalico.org/ds-ready=true"
fi
# Enable metadata concealment by firewalling pod traffic to the metadata server
# and run a proxy daemonset on nodes.
#
# TODO(#8867) Enable by default.
ENABLE_METADATA_CONCEALMENT="${ENABLE_METADATA_CONCEALMENT:-false}" # true, false
if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then
# Put the necessary label on the node so the daemonset gets scheduled.
NODE_LABELS="${NODE_LABELS},beta.kubernetes.io/metadata-proxy-ready=true"
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_METADATA_CONCEALMENT"
fi
# Optional: Enable node logging.
ENABLE_NODE_LOGGING="${KUBE_ENABLE_NODE_LOGGING:-true}"
LOGGING_DESTINATION="${KUBE_LOGGING_DESTINATION:-gcp}" # options: elasticsearch, gcp
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
ENABLE_CLUSTER_LOGGING="${KUBE_ENABLE_CLUSTER_LOGGING:-true}"
ELASTICSEARCH_LOGGING_REPLICAS=1
# Optional: Don't require https for registries in our local RFC1918 network
if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:-false} == "true" ]]; then
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS} --insecure-registry 10.0.0.0/8"
fi
# Optional: customize runtime config
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}"
# Optional: set feature gates
FEATURE_GATES="${KUBE_FEATURE_GATES:-ExperimentalCriticalPodAnnotation=true}"
if [[ ! -z "${NODE_ACCELERATORS}" ]]; then
FEATURE_GATES="${FEATURE_GATES},DevicePlugins=true"
if [[ "${NODE_ACCELERATORS}" =~ .*type=([a-zA-Z0-9-]+).* ]]; then
NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}"
fi
fi
# Optional: Install cluster DNS.
# Set CLUSTER_DNS_CORE_DNS to 'true' to install CoreDNS instead of kube-dns.
CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-false}"
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="${KUBE_DNS_SERVER_IP:-10.0.0.10}"
DNS_DOMAIN="${KUBE_DNS_DOMAIN:-cluster.local}"
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}"
# Optional: Install cluster docker registry.
ENABLE_CLUSTER_REGISTRY="${KUBE_ENABLE_CLUSTER_REGISTRY:-false}"
CLUSTER_REGISTRY_DISK="${CLUSTER_REGISTRY_PD:-${INSTANCE_PREFIX}-kube-system-kube-registry}"
CLUSTER_REGISTRY_DISK_SIZE="${CLUSTER_REGISTRY_DISK_SIZE:-200GB}"
CLUSTER_REGISTRY_DISK_TYPE_GCE="${CLUSTER_REGISTRY_DISK_TYPE_GCE:-pd-standard}"
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
# Optional: Install node problem detector.
# none - Not run node problem detector.
# daemonset - Run node problem detector as daemonset.
# standalone - Run node problem detector as standalone system daemon.
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]]; then
# Enable standalone mode by default for gci.
ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-standalone}"
else
ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-daemonset}"
fi
NODE_PROBLEM_DETECTOR_VERSION="${NODE_PROBLEM_DETECTOR_VERSION:-}"
NODE_PROBLEM_DETECTOR_TAR_HASH="${NODE_PROBLEM_DETECTOR_TAR_HASH:-}"
# Optional: Create autoscaler for cluster's nodes.
ENABLE_CLUSTER_AUTOSCALER="${KUBE_ENABLE_CLUSTER_AUTOSCALER:-false}"
if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-}"
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-}"
AUTOSCALER_ENABLE_SCALE_DOWN="${KUBE_AUTOSCALER_ENABLE_SCALE_DOWN:-true}"
AUTOSCALER_EXPANDER_CONFIG="${KUBE_AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
fi
# Optional: Enable Rescheduler
ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
# Optional: Enable allocation of pod IPs using IP aliases.
#
# BETA FEATURE.
#
# IP_ALIAS_SIZE is the size of the podCIDR allocated to a node.
# IP_ALIAS_SUBNETWORK is the subnetwork to allocate from. If empty, a
# new subnetwork will be created for the cluster.
ENABLE_IP_ALIASES=${KUBE_GCE_ENABLE_IP_ALIASES:-false}
if [ ${ENABLE_IP_ALIASES} = true ]; then
# Size of ranges allocated to each node. Currently supports only /32 and /24.
IP_ALIAS_SIZE=${KUBE_GCE_IP_ALIAS_SIZE:-/24}
IP_ALIAS_SUBNETWORK=${KUBE_GCE_IP_ALIAS_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-default}
# Reserve the services IP space to avoid being allocated for other GCP resources.
SERVICE_CLUSTER_IP_SUBNETWORK=${KUBE_GCE_SERVICE_CLUSTER_IP_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-services}
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_IP_ALIASES"
fi
# Enable GCE Alpha features.
if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} GCE_ALPHA_FEATURES"
fi
# Disable Docker live-restore.
if [[ -n "${DISABLE_DOCKER_LIVE_RESTORE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} DISABLE_DOCKER_LIVE_RESTORE"
fi
# Override default GLBC image
if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} GCE_GLBC_IMAGE"
fi
# Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,NodeRestriction,Priority,PVCProtection
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
fi
# ResourceQuota must come last, or a creation is recorded, but the pod was forbidden.
ADMISSION_CONTROL="${ADMISSION_CONTROL},ResourceQuota"
# Optional: if set to true kube-up will automatically check for existing resources and clean them up.
KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
# Storage backend. 'etcd2' supported, 'etcd3' experimental.
STORAGE_BACKEND=${STORAGE_BACKEND:-}
# Networking plugin specific settings.
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, opencontrail, kubenet
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
# Network Policy plugin specific settings.
NETWORK_POLICY_PROVIDER="${NETWORK_POLICY_PROVIDER:-none}" # calico
NON_MASQUERADE_CIDR="0.0.0.0/0"
# How should the kubelet configure hairpin mode?
HAIRPIN_MODE="${HAIRPIN_MODE:-promiscuous-bridge}" # promiscuous-bridge, hairpin-veth, none
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
E2E_STORAGE_TEST_ENVIRONMENT="${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}"
# Evict pods whenever compute resource availability on the nodes gets below a threshold.
EVICTION_HARD="${EVICTION_HARD:-memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%}"
# Optional: custom scheduling algorithm
SCHEDULING_ALGORITHM_PROVIDER="${SCHEDULING_ALGORITHM_PROVIDER:-}"
# Optional: install a default StorageClass
ENABLE_DEFAULT_STORAGE_CLASS="${ENABLE_DEFAULT_STORAGE_CLASS:-true}"
# Optional: Enable legacy ABAC policy that makes all service accounts superusers.
ENABLE_LEGACY_ABAC="${ENABLE_LEGACY_ABAC:-false}" # true, false
# TODO(dawn1107): Remove this once the flag is built into CVM image.
# Kernel panic upon soft lockup issue
SOFTLOCKUP_PANIC="${SOFTLOCKUP_PANIC:-false}" # true, false
# Indicates if the values (i.e. KUBE_USER and KUBE_PASSWORD for basic
# authentication) in metadata should be treated as canonical, and therefore disk
# copies ought to be recreated/clobbered.
METADATA_CLOBBERS_CONFIG="${METADATA_CLOBBERS_CONFIG:-false}"
ENABLE_BIG_CLUSTER_SUBNETS="${ENABLE_BIG_CLUSTER_SUBNETS:-false}"
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]]; then
echo "Warning: Basic audit logging is deprecated and will be removed. Please use advanced auditing instead."
fi
if [[ -n "${LOGROTATE_FILES_MAX_COUNT:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_FILES_MAX_COUNT"
fi
if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_MAX_SIZE"
fi
# Fluentd requirements
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-300Mi}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-100m}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-200Mi}"
# Heapster requirements
HEAPSTER_GCP_BASE_MEMORY="${HEAPSTER_GCP_BASE_MEMORY:-140Mi}"
HEAPSTER_GCP_MEMORY_PER_NODE="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
HEAPSTER_GCP_BASE_CPU="${HEAPSTER_GCP_BASE_CPU:-80m}"
HEAPSTER_GCP_CPU_PER_NODE="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE"
# prometheus-to-sd configuration
PROMETHEUS_TO_SD_ENDPOINT="${PROMETHEUS_TO_SD_ENDPOINT:-https://monitoring.googleapis.com/}"
PROMETHEUS_TO_SD_PREFIX="${PROMETHEUS_TO_SD_PREFIX:-custom.googleapis.com}"
ENABLE_PROMETHEUS_TO_SD="${ENABLE_PROMETHEUS_TO_SD:-false}"
# TODO(#51292): Make kube-proxy Daemonset default and remove the configuration here.
# Optional: [Experiment Only] Run kube-proxy as a DaemonSet if set to true, run as static pods otherwise.
KUBE_PROXY_DAEMONSET="${KUBE_PROXY_DAEMONSET:-false}" # true, false
# Optional: duration of cluster signed certificates.
CLUSTER_SIGNING_DURATION="${CLUSTER_SIGNING_DURATION:-}"
# Optional: enable pod priority
ENABLE_POD_PRIORITY="${ENABLE_POD_PRIORITY:-}"
if [[ "${ENABLE_POD_PRIORITY}" == "true" ]]; then
FEATURE_GATES="${FEATURE_GATES},PodPriority=true"
fi
# Optional: enable certificate rotation of the kubelet certificates.
ROTATE_CERTIFICATES="${ROTATE_CERTIFICATES:-}"
# The number of services that are allowed to sync concurrently. Will be passed
# into kube-controller-manager via `--concurrent-service-syncs`
CONCURRENT_SERVICE_SYNCS="${CONCURRENT_SERVICE_SYNCS:-}"

436
vendor/k8s.io/kubernetes/cluster/gce/config-test.sh generated vendored Executable file
View File

@ -0,0 +1,436 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(jbeda): Provide a way to override project
# gcloud multiplexing for shared GCE/GKE tests.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/gce/config-common.sh"
# Specifying KUBE_GCE_API_ENDPOINT will override the default GCE Compute API endpoint (https://www.googleapis.com/compute/v1/).
# This endpoint has to be pointing to v1 api. For example, https://www.googleapis.com/compute/staging_v1/
GCE_API_ENDPOINT=${KUBE_GCE_API_ENDPOINT:-}
GCLOUD=gcloud
ZONE=${KUBE_GCE_ZONE:-us-central1-b}
REGION=${ZONE%-*}
RELEASE_REGION_FALLBACK=${RELEASE_REGION_FALLBACK:-false}
REGIONAL_KUBE_ADDONS=${REGIONAL_KUBE_ADDONS:-true}
NODE_SIZE=${NODE_SIZE:-n1-standard-2}
NUM_NODES=${NUM_NODES:-3}
MASTER_SIZE=${MASTER_SIZE:-n1-standard-$(get-master-size)}
MASTER_MIN_CPU_ARCHITECTURE=${MASTER_MIN_CPU_ARCHITECTURE:-} # To allow choosing better architectures.
MASTER_DISK_TYPE=pd-ssd
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-$(get-master-disk-size)}
MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-$(get-master-root-disk-size)}
NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard}
NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
NODE_LOCAL_SSDS=${NODE_LOCAL_SSDS:-0}
NODE_ACCELERATORS=${NODE_ACCELERATORS:-""}
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true}
KUBE_APISERVER_REQUEST_TIMEOUT=300
PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false}
PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false}
KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true}
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-true}
CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false}
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
if [[ "${MASTER_OS_DISTRIBUTION}" == "coreos" ]]; then
MASTER_OS_DISTRIBUTION="container-linux"
fi
if [[ "${NODE_OS_DISTRIBUTION}" == "coreos" ]]; then
NODE_OS_DISTRIBUTION="container-linux"
fi
if [[ "${MASTER_OS_DISTRIBUTION}" == "cos" ]]; then
MASTER_OS_DISTRIBUTION="gci"
fi
if [[ "${NODE_OS_DISTRIBUTION}" == "cos" ]]; then
NODE_OS_DISTRIBUTION="gci"
fi
# GPUs supported in GCE do not have compatible drivers in Debian 7.
if [[ "${NODE_OS_DISTRIBUTION}" == "debian" ]]; then
NODE_ACCELERATORS=""
fi
# By default a cluster will be started with the master and nodes
# on Container-optimized OS (cos, previously known as gci). If
# you are updating the os image versions, update this variable.
# Also please update corresponding image for node e2e at:
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
CVM_VERSION=${CVM_VERSION:-container-vm-v20170627}
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-60-9592-90-0}
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}}
NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-cos-cloud}
NODE_SERVICE_ACCOUNT=${KUBE_GCE_NODE_SERVICE_ACCOUNT:-default}
CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker}
CONTAINER_RUNTIME_ENDPOINT=${KUBE_CONTAINER_RUNTIME_ENDPOINT:-}
LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-docker load -i}
GCI_DOCKER_VERSION=${KUBE_GCI_DOCKER_VERSION:-}
RKT_VERSION=${KUBE_RKT_VERSION:-1.23.0}
RKT_STAGE1_IMAGE=${KUBE_RKT_STAGE1_IMAGE:-coreos.com/rkt/stage1-coreos}
# MASTER_EXTRA_METADATA is the extra instance metadata on master instance separated by commas.
MASTER_EXTRA_METADATA=${KUBE_MASTER_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}}
# MASTER_EXTRA_METADATA is the extra instance metadata on node instance separated by commas.
NODE_EXTRA_METADATA=${KUBE_NODE_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}}
NETWORK=${KUBE_GCE_NETWORK:-e2e-test-${USER}}
if [[ "${CREATE_CUSTOM_NETWORK}" == true ]]; then
SUBNETWORK="${SUBNETWORK:-${NETWORK}-custom-subnet}"
fi
INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-e2e-test-${USER}}"
CLUSTER_NAME="${CLUSTER_NAME:-${INSTANCE_PREFIX}}"
MASTER_NAME="${INSTANCE_PREFIX}-master"
AGGREGATOR_MASTER_NAME="${INSTANCE_PREFIX}-aggregator"
INITIAL_ETCD_CLUSTER="${MASTER_NAME}"
ETCD_QUORUM_READ="${ENABLE_ETCD_QUORUM_READ:-false}"
MASTER_TAG="${INSTANCE_PREFIX}-master"
NODE_TAG="${INSTANCE_PREFIX}-minion"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-$(get-cluster-ip-range)}"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
# NODE_IP_RANGE is used when ENABLE_IP_ALIASES=true or CREATE_CUSTOM_NETWORK=true.
# It is the primary range in the subnet and is the range used for node instance IPs.
NODE_IP_RANGE="$(get-node-ip-range)"
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}"
# Optional: set feature gates
FEATURE_GATES="${KUBE_FEATURE_GATES:-ExperimentalCriticalPodAnnotation=true}"
TERMINATED_POD_GC_THRESHOLD=${TERMINATED_POD_GC_THRESHOLD:-100}
# Extra docker options for nodes.
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-}"
# Enable the docker debug mode.
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS} --debug"
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
# When set to true, Docker Cache is enabled by default as part of the cluster bring up.
ENABLE_DOCKER_REGISTRY_CACHE=true
# Optional: Deploy a L7 loadbalancer controller to fulfill Ingress requests:
# glbc - CE L7 Load Balancer Controller
ENABLE_L7_LOADBALANCING="${KUBE_ENABLE_L7_LOADBALANCING:-glbc}"
# Optional: Cluster monitoring to setup as part of the cluster bring up:
# none - No cluster monitoring setup
# influxdb - Heapster, InfluxDB, and Grafana
# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging
# stackdriver - Heapster, Google Cloud Monitoring (schema container), and Google Cloud Logging
# googleinfluxdb - Enable influxdb and google (except GCM)
# standalone - Heapster only. Metrics available via Heapster REST API.
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
# Optional: Enable Metrics Server. Metrics Server should be enable everywhere,
# since it's a critical component, but in the first release we need a way to disable
# this in case of stability issues.
# TODO(piosz) remove this option once Metrics Server became a stable thing.
ENABLE_METRICS_SERVER="${KUBE_ENABLE_METRICS_SERVER:-true}"
# One special node out of NUM_NODES would be created of this type if specified.
# Useful for scheduling heapster in large clusters with nodes of small size.
HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
# Set etcd image (e.g. gcr.io/google_containers/etcd) and version (e.g. 3.1.10) if you need
# non-default version.
ETCD_IMAGE="${TEST_ETCD_IMAGE:-}"
ETCD_DOCKER_REPOSITORY="${TEST_ETCD_DOCKER_REPOSITORY:-}"
ETCD_VERSION="${TEST_ETCD_VERSION:-}"
# Default Log level for all components in test clusters and variables to override it in specific components.
TEST_CLUSTER_LOG_LEVEL="${TEST_CLUSTER_LOG_LEVEL:---v=4}"
KUBELET_TEST_LOG_LEVEL="${KUBELET_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
DOCKER_TEST_LOG_LEVEL="${DOCKER_TEST_LOG_LEVEL:---log-level=info}"
API_SERVER_TEST_LOG_LEVEL="${API_SERVER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
CONTROLLER_MANAGER_TEST_LOG_LEVEL="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
SCHEDULER_TEST_LOG_LEVEL="${SCHEDULER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
KUBEPROXY_TEST_LOG_LEVEL="${KUBEPROXY_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
# TODO: change this and flex e2e test when default flex volume install path is changed for GCI
# Set flex dir to one that's readable from controller-manager container and writable by the flex e2e test.
if [[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]]; then
CONTROLLER_MANAGER_TEST_VOLUME_PLUGIN_DIR="--flex-volume-plugin-dir=/etc/srv/kubernetes/kubelet-plugins/volume/exec"
fi
# Set flex dir to one that's readable from kubelet and writable by the flex e2e test.
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]] || ([[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]] && [[ "${REGISTER_MASTER_KUBELET}" == "false" ]]); then
KUBELET_TEST_VOLUME_PLUGIN_DIR="--volume-plugin-dir=/etc/srv/kubernetes/kubelet-plugins/volume/exec"
fi
TEST_CLUSTER_DELETE_COLLECTION_WORKERS="${TEST_CLUSTER_DELETE_COLLECTION_WORKERS:---delete-collection-workers=1}"
TEST_CLUSTER_MAX_REQUESTS_INFLIGHT="${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT:-}"
TEST_CLUSTER_RESYNC_PERIOD="${TEST_CLUSTER_RESYNC_PERIOD:---min-resync-period=3m}"
# ContentType used by all components to communicate with apiserver.
TEST_CLUSTER_API_CONTENT_TYPE="${TEST_CLUSTER_API_CONTENT_TYPE:-}"
KUBELET_TEST_ARGS="${KUBELET_TEST_ARGS:-} --max-pods=110 --serialize-image-pulls=false ${TEST_CLUSTER_API_CONTENT_TYPE} ${KUBELET_TEST_VOLUME_PLUGIN_DIR:-}"
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]] || [[ "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]]; then
NODE_KUBELET_TEST_ARGS=" --experimental-kernel-memcg-notification=true"
fi
if [[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]] || [[ "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]]; then
MASTER_KUBELET_TEST_ARGS=" --experimental-kernel-memcg-notification=true"
fi
APISERVER_TEST_ARGS="${APISERVER_TEST_ARGS:-} --runtime-config=extensions/v1beta1 ${TEST_CLUSTER_DELETE_COLLECTION_WORKERS} ${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT}"
CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE} ${CONTROLLER_MANAGER_TEST_VOLUME_PLUGIN_DIR:-}"
SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
KUBEPROXY_TEST_ARGS="${KUBEPROXY_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
# Historically fluentd was a manifest pod and then was migrated to DaemonSet.
# To avoid situation during cluster upgrade when there are two instances
# of fluentd running on a node, kubelet need to mark node on which
# fluentd is not running as a manifest pod with appropriate label.
# TODO(piosz): remove this in 1.8
NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
# NON_MASTER_NODE_LABELS are labels will only be applied on non-master nodes.
NON_MASTER_NODE_LABELS="${KUBE_NON_MASTER_NODE_LABELS:-}"
# To avoid running Calico on a node that is not configured appropriately,
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
if [[ ${NETWORK_POLICY_PROVIDER:-} == "calico" ]]; then
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}projectcalico.org/ds-ready=true"
fi
# Enable metadata concealment by firewalling pod traffic to the metadata server
# and run a proxy daemonset on nodes.
ENABLE_METADATA_CONCEALMENT="${ENABLE_METADATA_CONCEALMENT:-true}" # true, false
if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then
# Put the necessary label on the node so the daemonset gets scheduled.
NODE_LABELS="${NODE_LABELS},beta.kubernetes.io/metadata-proxy-ready=true"
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_METADATA_CONCEALMENT"
fi
# Optional: Enable node logging.
ENABLE_NODE_LOGGING="${KUBE_ENABLE_NODE_LOGGING:-true}"
LOGGING_DESTINATION="${KUBE_LOGGING_DESTINATION:-gcp}" # options: elasticsearch, gcp
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
ENABLE_CLUSTER_LOGGING="${KUBE_ENABLE_CLUSTER_LOGGING:-true}"
ELASTICSEARCH_LOGGING_REPLICAS=1
# Optional: Don't require https for registries in our local RFC1918 network
if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:-false} == "true" ]]; then
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS} --insecure-registry 10.0.0.0/8"
fi
if [[ ! -z "${NODE_ACCELERATORS}" ]]; then
FEATURE_GATES="${FEATURE_GATES},DevicePlugins=true"
if [[ "${NODE_ACCELERATORS}" =~ .*type=([a-zA-Z0-9-]+).* ]]; then
NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}"
fi
fi
# Optional: Install cluster DNS.
# Set CLUSTER_DNS_CORE_DNS to 'true' to install CoreDNS instead of kube-dns.
CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-false}"
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="10.0.0.10"
DNS_DOMAIN="cluster.local"
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}"
# Optional: Install cluster docker registry.
ENABLE_CLUSTER_REGISTRY="${KUBE_ENABLE_CLUSTER_REGISTRY:-false}"
CLUSTER_REGISTRY_DISK="${CLUSTER_REGISTRY_DISK:-${INSTANCE_PREFIX}-kube-system-kube-registry}"
CLUSTER_REGISTRY_DISK_SIZE="${CLUSTER_REGISTRY_DISK_SIZE:-200GB}"
CLUSTER_REGISTRY_DISK_TYPE_GCE="${CLUSTER_REGISTRY_DISK_TYPE_GCE:-pd-standard}"
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
# Optional: Install node problem detector.
# none - Not run node problem detector.
# daemonset - Run node problem detector as daemonset.
# standalone - Run node problem detector as standalone system daemon.
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]]; then
# Enable standalone mode by default for gci.
ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-standalone}"
else
ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-daemonset}"
fi
NODE_PROBLEM_DETECTOR_VERSION="${NODE_PROBLEM_DETECTOR_VERSION:-}"
NODE_PROBLEM_DETECTOR_TAR_HASH="${NODE_PROBLEM_DETECTOR_TAR_HASH:-}"
# Optional: Create autoscaler for cluster's nodes.
ENABLE_CLUSTER_AUTOSCALER="${KUBE_ENABLE_CLUSTER_AUTOSCALER:-false}"
if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-}"
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-}"
AUTOSCALER_ENABLE_SCALE_DOWN="${KUBE_AUTOSCALER_ENABLE_SCALE_DOWN:-false}"
AUTOSCALER_EXPANDER_CONFIG="${KUBE_AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
fi
# Optional: Enable Rescheduler
ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
# Optional: Enable allocation of pod IPs using IP aliases.
#
# BETA FEATURE.
#
# IP_ALIAS_SIZE is the size of the podCIDR allocated to a node.
# IP_ALIAS_SUBNETWORK is the subnetwork to allocate from. If empty, a
# new subnetwork will be created for the cluster.
ENABLE_IP_ALIASES=${KUBE_GCE_ENABLE_IP_ALIASES:-false}
if [ ${ENABLE_IP_ALIASES} = true ]; then
# Size of ranges allocated to each node. gcloud current supports only /32 and /24.
IP_ALIAS_SIZE=${KUBE_GCE_IP_ALIAS_SIZE:-/24}
IP_ALIAS_SUBNETWORK=${KUBE_GCE_IP_ALIAS_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-default}
# Reserve the services IP space to avoid being allocated for other GCP resources.
SERVICE_CLUSTER_IP_SUBNETWORK=${KUBE_GCE_SERVICE_CLUSTER_IP_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-services}
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_IP_ALIASES"
fi
# Enable GCE Alpha features.
if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} GCE_ALPHA_FEATURES"
fi
# Disable Docker live-restore.
if [[ -n "${DISABLE_DOCKER_LIVE_RESTORE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} DISABLE_DOCKER_LIVE_RESTORE"
fi
# Override default GLBC image
if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} GCE_GLBC_IMAGE"
fi
if [[ -z "${KUBE_ADMISSION_CONTROL:-}" ]]; then
ADMISSION_CONTROL="Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,PodPreset,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority"
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
fi
# ResourceQuota must come last, or a creation is recorded, but the pod may be forbidden.
ADMISSION_CONTROL="${ADMISSION_CONTROL},MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
else
ADMISSION_CONTROL=${KUBE_ADMISSION_CONTROL}
fi
# Optional: if set to true kube-up will automatically check for existing resources and clean them up.
KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
# Optional: setting it to true denotes this is a testing cluster,
# so that we can use pulled kubernetes binaries, even if binaries
# are pre-installed in the image. Note that currently this logic
# is only supported in trusty or GCI.
TEST_CLUSTER="${TEST_CLUSTER:-true}"
# Storage backend. 'etcd2' and 'etcd3' are supported.
STORAGE_BACKEND=${STORAGE_BACKEND:-}
# Storage media type: application/json and application/vnd.kubernetes.protobuf are supported.
STORAGE_MEDIA_TYPE=${STORAGE_MEDIA_TYPE:-}
# OpenContrail networking plugin specific settings
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, opencontrail, kubenet
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
# Network Policy plugin specific settings.
NETWORK_POLICY_PROVIDER="${NETWORK_POLICY_PROVIDER:-none}" # calico
# How should the kubelet configure hairpin mode?
HAIRPIN_MODE="${HAIRPIN_MODE:-promiscuous-bridge}" # promiscuous-bridge, hairpin-veth, none
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
# Optional: if set to true, a image puller is deployed. Only for use in e2e clusters.
# TODO: Pipe this through GKE e2e clusters once we know it helps.
PREPULL_E2E_IMAGES="${PREPULL_E2E_IMAGES:-true}"
# Evict pods whenever compute resource availability on the nodes gets below a threshold.
EVICTION_HARD="${EVICTION_HARD:-memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%}"
# Optional: custom scheduling algorithm
SCHEDULING_ALGORITHM_PROVIDER="${SCHEDULING_ALGORITHM_PROVIDER:-}"
# Optional: install a default StorageClass
ENABLE_DEFAULT_STORAGE_CLASS="${ENABLE_DEFAULT_STORAGE_CLASS:-true}"
# Optional: Enable legacy ABAC policy that makes all service accounts superusers.
# Disabling this by default in tests ensures default RBAC policies are sufficient from 1.6+
# Upgrade test jobs that go from a version < 1.6 to a version >= 1.6 should override this to be true.
ENABLE_LEGACY_ABAC="${ENABLE_LEGACY_ABAC:-false}" # true, false
# TODO(dawn1107): Remove this once the flag is built into CVM image.
# Kernel panic upon soft lockup issue
SOFTLOCKUP_PANIC="${SOFTLOCKUP_PANIC:-true}" # true, false
# Enable a simple "AdvancedAuditing" setup for testing.
ENABLE_APISERVER_ADVANCED_AUDIT="${ENABLE_APISERVER_ADVANCED_AUDIT:-true}" # true, false
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]]; then
echo "Warning: Basic audit logging is deprecated and will be removed. Please use advanced auditing instead."
fi
ENABLE_BIG_CLUSTER_SUBNETS="${ENABLE_BIG_CLUSTER_SUBNETS:-false}"
if [[ -n "${LOGROTATE_FILES_MAX_COUNT:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_FILES_MAX_COUNT"
fi
if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_MAX_SIZE"
fi
# Fluentd requirements
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-300Mi}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-100m}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-200Mi}"
# Heapster requirements
HEAPSTER_GCP_BASE_MEMORY="${HEAPSTER_GCP_BASE_MEMORY:-140Mi}"
HEAPSTER_GCP_MEMORY_PER_NODE="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
HEAPSTER_GCP_BASE_CPU="${HEAPSTER_GCP_BASE_CPU:-80m}"
HEAPSTER_GCP_CPU_PER_NODE="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE"
# prometheus-to-sd configuration
PROMETHEUS_TO_SD_ENDPOINT="${PROMETHEUS_TO_SD_ENDPOINT:-https://monitoring.googleapis.com/}"
PROMETHEUS_TO_SD_PREFIX="${PROMETHEUS_TO_SD_PREFIX:-custom.googleapis.com}"
ENABLE_PROMETHEUS_TO_SD="${ENABLE_PROMETHEUS_TO_SD:-true}"
# TODO(#51292): Make kube-proxy Daemonset default and remove the configuration here.
# Optional: [Experiment Only] Run kube-proxy as a DaemonSet if set to true, run as static pods otherwise.
KUBE_PROXY_DAEMONSET="${KUBE_PROXY_DAEMONSET:-false}" # true, false
# Optional: duration of cluster signed certificates.
CLUSTER_SIGNING_DURATION="${CLUSTER_SIGNING_DURATION:-}"
# Optional: enable pod priority
ENABLE_POD_PRIORITY="${ENABLE_POD_PRIORITY:-}"
if [[ "${ENABLE_POD_PRIORITY}" == "true" ]]; then
FEATURE_GATES="${FEATURE_GATES},PodPriority=true"
fi
# Optional: enable certificate rotation of the kubelet certificates.
ROTATE_CERTIFICATES="${ROTATE_CERTIFICATES:-}"
# The number of services that are allowed to sync concurrently. Will be passed
# into kube-controller-manager via `--concurrent-service-syncs`
CONCURRENT_SERVICE_SYNCS="${CONCURRENT_SERVICE_SYNCS:-}"

899
vendor/k8s.io/kubernetes/cluster/gce/configure-vm.sh generated vendored Executable file
View File

@ -0,0 +1,899 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# If we have any arguments at all, this is a push and not just setup.
is_push=$@
function ensure-basic-networking() {
# Deal with GCE networking bring-up race. (We rely on DNS for a lot,
# and it's just not worth doing a whole lot of startup work if this
# isn't ready yet.)
until getent hosts metadata.google.internal &>/dev/null; do
echo 'Waiting for functional DNS (trying to resolve metadata.google.internal)...'
sleep 3
done
until getent hosts $(hostname -f || echo _error_) &>/dev/null; do
echo 'Waiting for functional DNS (trying to resolve my own FQDN)...'
sleep 3
done
until getent hosts $(hostname -i || echo _error_) &>/dev/null; do
echo 'Waiting for functional DNS (trying to resolve my own IP)...'
sleep 3
done
echo "Networking functional on $(hostname) ($(hostname -i))"
}
# A hookpoint for installing any needed packages
ensure-packages() {
:
}
function create-node-pki {
echo "Creating node pki files"
local -r pki_dir="/etc/kubernetes/pki"
mkdir -p "${pki_dir}"
if [[ -z "${CA_CERT_BUNDLE:-}" ]]; then
CA_CERT_BUNDLE="${CA_CERT}"
fi
CA_CERT_BUNDLE_PATH="${pki_dir}/ca-certificates.crt"
echo "${CA_CERT_BUNDLE}" | base64 --decode > "${CA_CERT_BUNDLE_PATH}"
if [[ ! -z "${KUBELET_CERT:-}" && ! -z "${KUBELET_KEY:-}" ]]; then
KUBELET_CERT_PATH="${pki_dir}/kubelet.crt"
echo "${KUBELET_CERT}" | base64 --decode > "${KUBELET_CERT_PATH}"
KUBELET_KEY_PATH="${pki_dir}/kubelet.key"
echo "${KUBELET_KEY}" | base64 --decode > "${KUBELET_KEY_PATH}"
fi
}
# A hookpoint for setting up local devices
ensure-local-disks() {
for ssd in /dev/disk/by-id/google-local-ssd-*; do
if [ -e "$ssd" ]; then
ssdnum=`echo $ssd | sed -e 's/\/dev\/disk\/by-id\/google-local-ssd-\([0-9]*\)/\1/'`
echo "Formatting and mounting local SSD $ssd to /mnt/disks/ssd$ssdnum"
mkdir -p /mnt/disks/ssd$ssdnum
/usr/share/google/safe_format_and_mount -m "mkfs.ext4 -F" "${ssd}" /mnt/disks/ssd$ssdnum &>/var/log/local-ssd-$ssdnum-mount.log || \
{ echo "Local SSD $ssdnum mount failed, review /var/log/local-ssd-$ssdnum-mount.log"; return 1; }
else
echo "No local SSD disks found."
fi
done
}
function config-ip-firewall {
echo "Configuring IP firewall rules"
if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]]; then
echo "Add rule for metadata concealment"
iptables -w -t nat -I PREROUTING -p tcp -d 169.254.169.254 --dport 80 -m comment --comment "metadata-concealment: bridge traffic to metadata server goes to metadata proxy" -j DNAT --to-destination 127.0.0.1:988
fi
}
function ensure-install-dir() {
INSTALL_DIR="/var/cache/kubernetes-install"
mkdir -p ${INSTALL_DIR}
cd ${INSTALL_DIR}
}
function salt-apiserver-timeout-grain() {
cat <<EOF >>/etc/salt/minion.d/grains.conf
minRequestTimeout: '$1'
EOF
}
function set-broken-motd() {
echo -e '\nBroken (or in progress) Kubernetes node setup! Suggested first step:\n tail /var/log/startupscript.log\n' > /etc/motd
}
function reset-motd() {
# kubelet is installed both on the master and nodes, and the version is easy to parse (unlike kubectl)
local -r version="$(/usr/local/bin/kubelet --version=true | cut -f2 -d " ")"
# This logic grabs either a release tag (v1.2.1 or v1.2.1-alpha.1),
# or the git hash that's in the build info.
local gitref="$(echo "${version}" | sed -r "s/(v[0-9]+\.[0-9]+\.[0-9]+)(-[a-z]+\.[0-9]+)?.*/\1\2/g")"
local devel=""
if [[ "${gitref}" != "${version}" ]]; then
devel="
Note: This looks like a development version, which might not be present on GitHub.
If it isn't, the closest tag is at:
https://github.com/kubernetes/kubernetes/tree/${gitref}
"
gitref="${version//*+/}"
fi
cat > /etc/motd <<EOF
Welcome to Kubernetes ${version}!
You can find documentation for Kubernetes at:
http://docs.kubernetes.io/
The source for this release can be found at:
/usr/local/share/doc/kubernetes/kubernetes-src.tar.gz
Or you can download it at:
https://storage.googleapis.com/kubernetes-release/release/${version}/kubernetes-src.tar.gz
It is based on the Kubernetes source at:
https://github.com/kubernetes/kubernetes/tree/${gitref}
${devel}
For Kubernetes copyright and licensing information, see:
/usr/local/share/doc/kubernetes/LICENSES
EOF
}
function curl-metadata() {
curl --fail --retry 5 --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/attributes/${1}"
}
function set-kube-env() {
local kube_env_yaml="${INSTALL_DIR}/kube_env.yaml"
until curl-metadata kube-env > "${kube_env_yaml}"; do
echo 'Waiting for kube-env...'
sleep 3
done
# kube-env has all the environment variables we care about, in a flat yaml format
eval "$(python -c '
import pipes,sys,yaml
for k,v in yaml.load(sys.stdin).iteritems():
print("""readonly {var}={value}""".format(var = k, value = pipes.quote(str(v))))
print("""export {var}""".format(var = k))
' < """${kube_env_yaml}""")"
}
function remove-docker-artifacts() {
echo "== Deleting docker0 =="
apt-get-install bridge-utils
# Remove docker artifacts on minion nodes, if present
iptables -t nat -F || true
ifconfig docker0 down || true
brctl delbr docker0 || true
echo "== Finished deleting docker0 =="
}
# Retry a download until we get it. Takes a hash and a set of URLs.
#
# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown.
# $2+ are the URLs to download.
download-or-bust() {
local -r hash="$1"
shift 1
urls=( $* )
while true; do
for url in "${urls[@]}"; do
local file="${url##*/}"
rm -f "${file}"
if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 "${url}"; then
echo "== Failed to download ${url}. Retrying. =="
elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha1sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} =="
return 1
fi
}
apt-get-install() {
local -r packages=( $@ )
installed=true
for package in "${packages[@]}"; do
if ! dpkg -s "${package}" &>/dev/null; then
installed=false
break
fi
done
if [[ "${installed}" == "true" ]]; then
echo "== ${packages[@]} already installed, skipped apt-get install ${packages[@]} =="
return
fi
apt-get-update
# Forcibly install packages (options borrowed from Salt logs).
until apt-get -q -y -o DPkg::Options::=--force-confold -o DPkg::Options::=--force-confdef install $@; do
echo "== install of packages $@ failed, retrying =="
sleep 5
done
}
apt-get-update() {
echo "== Refreshing package database =="
until apt-get update; do
echo "== apt-get update failed, retrying =="
sleep 5
done
}
# Restart any services that need restarting due to a library upgrade
# Uses needrestart
restart-updated-services() {
# We default to restarting services, because this is only done as part of an update
if [[ "${AUTO_RESTART_SERVICES:-true}" != "true" ]]; then
echo "Auto restart of services prevented by AUTO_RESTART_SERVICES=${AUTO_RESTART_SERVICES}"
return
fi
echo "Restarting services with updated libraries (needrestart -r a)"
# The pipes make sure that needrestart doesn't think it is running with a TTY
# Debian bug #803249; fixed but not necessarily in package repos yet
echo "" | needrestart -r a 2>&1 | tee /dev/null
}
# Reboot the machine if /var/run/reboot-required exists
reboot-if-required() {
if [[ ! -e "/var/run/reboot-required" ]]; then
return
fi
echo "Reboot is required (/var/run/reboot-required detected)"
if [[ -e "/var/run/reboot-required.pkgs" ]]; then
echo "Packages that triggered reboot:"
cat /var/run/reboot-required.pkgs
fi
# We default to rebooting the machine because this is only done as part of an update
if [[ "${AUTO_REBOOT:-true}" != "true" ]]; then
echo "Reboot prevented by AUTO_REBOOT=${AUTO_REBOOT}"
return
fi
rm -f /var/run/reboot-required
rm -f /var/run/reboot-required.pkgs
echo "Triggering reboot"
init 6
}
# Install upgrades using unattended-upgrades, then reboot or restart services
auto-upgrade() {
# We default to not installing upgrades
if [[ "${AUTO_UPGRADE:-false}" != "true" ]]; then
echo "AUTO_UPGRADE not set to true; won't auto-upgrade"
return
fi
apt-get-install unattended-upgrades needrestart
unattended-upgrade --debug
reboot-if-required # We may reboot the machine right here
restart-updated-services
}
#
# Install salt from GCS. See README.md for instructions on how to update these
# debs.
install-salt() {
if dpkg -s salt-minion &>/dev/null; then
echo "== SaltStack already installed, skipping install step =="
return
fi
echo "== Refreshing package database =="
until apt-get update; do
echo "== apt-get update failed, retrying =="
sleep 5
done
mkdir -p /var/cache/salt-install
cd /var/cache/salt-install
DEBS=(
libzmq3_3.2.3+dfsg-1~bpo70~dst+1_amd64.deb
python-zmq_13.1.0-1~bpo70~dst+1_amd64.deb
salt-common_2014.1.13+ds-1~bpo70+1_all.deb
salt-minion_2014.1.13+ds-1~bpo70+1_all.deb
)
URL_BASE="https://storage.googleapis.com/kubernetes-release/salt"
for deb in "${DEBS[@]}"; do
if [ ! -e "${deb}" ]; then
download-or-bust "" "${URL_BASE}/${deb}"
fi
done
# Based on
# https://major.io/2014/06/26/install-debian-packages-without-starting-daemons/
# We do this to prevent Salt from starting the salt-minion
# daemon. The other packages don't have relevant daemons. (If you
# add a package that needs a daemon started, add it to a different
# list.)
cat > /usr/sbin/policy-rc.d <<EOF
#!/bin/sh
echo "Salt shall not start." >&2
exit 101
EOF
chmod 0755 /usr/sbin/policy-rc.d
for deb in "${DEBS[@]}"; do
echo "== Installing ${deb}, ignore dependency complaints (will fix later) =="
dpkg --skip-same-version --force-depends -i "${deb}"
done
# This will install any of the unmet dependencies from above.
echo "== Installing unmet dependencies =="
until apt-get install -f -y; do
echo "== apt-get install failed, retrying =="
sleep 5
done
rm /usr/sbin/policy-rc.d
# Log a timestamp
echo "== Finished installing Salt =="
}
# Ensure salt-minion isn't running and never runs
stop-salt-minion() {
if [[ -e /etc/init/salt-minion.override ]]; then
# Assume this has already run (upgrade, or baked into containervm)
return
fi
# This ensures it on next reboot
echo manual > /etc/init/salt-minion.override
update-rc.d salt-minion disable
while service salt-minion status >/dev/null; do
echo "salt-minion found running, stopping"
service salt-minion stop
sleep 1
done
}
# Finds the master PD device; returns it in MASTER_PD_DEVICE
find-master-pd() {
MASTER_PD_DEVICE=""
if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then
return
fi
device_info=$(ls -l /dev/disk/by-id/google-master-pd)
relative_path=${device_info##* }
MASTER_PD_DEVICE="/dev/disk/by-id/${relative_path}"
}
# Create the overlay files for the salt tree. We create these in a separate
# place so that we can blow away the rest of the salt configs on a kube-push and
# re-apply these.
function create-salt-pillar() {
# Always overwrite the cluster-params.sls (even on a push, we have
# these variables)
mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
node_tags: '$(echo "$NODE_TAGS" | sed -e "s/'/''/g")'
node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")'
cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
allocate_node_cidrs: '$(echo "$ALLOCATE_NODE_CIDRS" | sed -e "s/'/''/g")'
non_masquerade_cidr: '$(echo "$NON_MASQUERADE_CIDR" | sed -e "s/'/''/g")'
service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'
enable_cluster_ui: '$(echo "$ENABLE_CLUSTER_UI" | sed -e "s/'/''/g")'
enable_node_problem_detector: '$(echo "$ENABLE_NODE_PROBLEM_DETECTOR" | sed -e "s/'/''/g")'
enable_l7_loadbalancing: '$(echo "$ENABLE_L7_LOADBALANCING" | sed -e "s/'/''/g")'
enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
enable_metadata_proxy: '$(echo "$ENABLE_METADATA_CONCEALMENT" | sed -e "s/'/''/g")'
enable_metrics_server: '$(echo "$ENABLE_METRICS_SERVER" | sed -e "s/'/''/g")'
enable_pod_security_policy: '$(echo "$ENABLE_POD_SECURITY_POLICY" | sed -e "s/'/''/g")'
enable_rescheduler: '$(echo "$ENABLE_RESCHEDULER" | sed -e "s/'/''/g")'
logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
cluster_dns_core_dns: '$(echo "$CLUSTER_DNS_CORE_DNS" | sed -e "s/'/''/g")'
enable_cluster_registry: '$(echo "$ENABLE_CLUSTER_REGISTRY" | sed -e "s/'/''/g")'
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
enable_dns_horizontal_autoscaler: '$(echo "$ENABLE_DNS_HORIZONTAL_AUTOSCALER" | sed -e "s/'/''/g")'
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
network_provider: '$(echo "$NETWORK_PROVIDER" | sed -e "s/'/''/g")'
prepull_e2e_images: '$(echo "$PREPULL_E2E_IMAGES" | sed -e "s/'/''/g")'
hairpin_mode: '$(echo "$HAIRPIN_MODE" | sed -e "s/'/''/g")'
softlockup_panic: '$(echo "$SOFTLOCKUP_PANIC" | sed -e "s/'/''/g")'
opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG" | sed -e "s/'/''/g")'
opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG")'
opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET")'
network_policy_provider: '$(echo "$NETWORK_POLICY_PROVIDER" | sed -e "s/'/''/g")'
enable_manifest_url: '$(echo "${ENABLE_MANIFEST_URL:-}" | sed -e "s/'/''/g")'
manifest_url: '$(echo "${MANIFEST_URL:-}" | sed -e "s/'/''/g")'
manifest_url_header: '$(echo "${MANIFEST_URL_HEADER:-}" | sed -e "s/'/''/g")'
num_nodes: $(echo "${NUM_NODES:-}" | sed -e "s/'/''/g")
e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")'
kube_uid: '$(echo "${KUBE_UID}" | sed -e "s/'/''/g")'
initial_etcd_cluster: '$(echo "${INITIAL_ETCD_CLUSTER:-}" | sed -e "s/'/''/g")'
initial_etcd_cluster_state: '$(echo "${INITIAL_ETCD_CLUSTER_STATE:-}" | sed -e "s/'/''/g")'
ca_cert_bundle_path: '$(echo "${CA_CERT_BUNDLE_PATH:-}" | sed -e "s/'/''/g")'
hostname: '$(echo "${ETCD_HOSTNAME:-$(hostname -s)}" | sed -e "s/'/''/g")'
enable_pod_priority: '$(echo "${ENABLE_POD_PRIORITY:-}" | sed -e "s/'/''/g")'
enable_default_storage_class: '$(echo "$ENABLE_DEFAULT_STORAGE_CLASS" | sed -e "s/'/''/g")'
kube_proxy_daemonset: '$(echo "$KUBE_PROXY_DAEMONSET" | sed -e "s/'/''/g")'
EOF
if [ -n "${STORAGE_BACKEND:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
storage_backend: '$(echo "$STORAGE_BACKEND" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${STORAGE_MEDIA_TYPE:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
storage_media_type: '$(echo "$STORAGE_MEDIA_TYPE" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT_SEC:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kube_apiserver_request_timeout_sec: '$(echo "$KUBE_APISERVER_REQUEST_TIMEOUT_SEC" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
etcd_liveness_probe_initial_delay: '$(echo "$ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kube_apiserver_liveness_probe_initial_delay: '$(echo "$KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ADMISSION_CONTROL:-}" ] && [ ${ADMISSION_CONTROL} == *"ImagePolicyWebhook"* ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
admission-control-config-file: /etc/admission_controller.config
EOF
fi
if [ -n "${KUBELET_PORT:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kubelet_port: '$(echo "$KUBELET_PORT" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ETCD_IMAGE:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
etcd_docker_tag: '$(echo "$ETCD_IMAGE" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ETCD_DOCKER_REPOSITORY:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
etcd_docker_repository: '$(echo "$ETCD_DOCKER_REPOSITORY" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ETCD_VERSION:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
etcd_version: '$(echo "$ETCD_VERSION" | sed -e "s/'/''/g")'
EOF
fi
if [[ -n "${ETCD_CA_KEY:-}" && -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
etcd_over_ssl: 'true'
EOF
else
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
etcd_over_ssl: 'false'
EOF
fi
if [ -n "${ETCD_QUORUM_READ:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
etcd_quorum_read: '$(echo "${ETCD_QUORUM_READ}" | sed -e "s/'/''/g")'
EOF
fi
# Configuration changes for test clusters
if [ -n "${APISERVER_TEST_ARGS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
apiserver_test_args: '$(echo "$APISERVER_TEST_ARGS" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${API_SERVER_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
api_server_test_log_level: '$(echo "$API_SERVER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBELET_TEST_ARGS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kubelet_test_args: '$(echo "$KUBELET_TEST_ARGS" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBELET_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kubelet_test_log_level: '$(echo "$KUBELET_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${DOCKER_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
docker_test_log_level: '$(echo "$DOCKER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${CONTROLLER_MANAGER_TEST_ARGS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
controller_manager_test_args: '$(echo "$CONTROLLER_MANAGER_TEST_ARGS" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
controller_manager_test_log_level: '$(echo "$CONTROLLER_MANAGER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${SCHEDULER_TEST_ARGS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
scheduler_test_args: '$(echo "$SCHEDULER_TEST_ARGS" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${SCHEDULER_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
scheduler_test_log_level: '$(echo "$SCHEDULER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBEPROXY_TEST_ARGS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kubeproxy_test_args: '$(echo "$KUBEPROXY_TEST_ARGS" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBEPROXY_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kubeproxy_test_log_level: '$(echo "$KUBEPROXY_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
# TODO: Replace this with a persistent volume (and create it).
if [[ "${ENABLE_CLUSTER_REGISTRY}" == true && -n "${CLUSTER_REGISTRY_DISK}" ]]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
cluster_registry_disk_type: gce
cluster_registry_disk_size: $(echo $(convert-bytes-gce-kube ${CLUSTER_REGISTRY_DISK_SIZE}) | sed -e "s/'/''/g")
cluster_registry_disk_name: $(echo ${CLUSTER_REGISTRY_DISK} | sed -e "s/'/''/g")
EOF
fi
if [ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
terminated_pod_gc_threshold: '$(echo "${TERMINATED_POD_GC_THRESHOLD}" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ENABLE_CUSTOM_METRICS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
enable_custom_metrics: '$(echo "${ENABLE_CUSTOM_METRICS}" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${NODE_LABELS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
node_labels: '$(echo "${NODE_LABELS}" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${NON_MASTER_NODE_LABELS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
non_master_node_labels: '$(echo "${NON_MASTER_NODE_LABELS}" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${NODE_TAINTS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
node_taints: '$(echo "${NODE_TAINTS}" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${EVICTION_HARD:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
eviction_hard: '$(echo "${EVICTION_HARD}" | sed -e "s/'/''/g")'
EOF
fi
if [[ "${ENABLE_CLUSTER_AUTOSCALER:-false}" == "true" ]]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
enable_cluster_autoscaler: '$(echo "${ENABLE_CLUSTER_AUTOSCALER}" | sed -e "s/'/''/g")'
autoscaler_mig_config: '$(echo "${AUTOSCALER_MIG_CONFIG}" | sed -e "s/'/''/g")'
autoscaler_expander_config: '$(echo "${AUTOSCALER_EXPANDER_CONFIG}" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
scheduling_algorithm_provider: '$(echo "${SCHEDULING_ALGORITHM_PROVIDER}" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ENABLE_IP_ALIASES:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
enable_ip_aliases: '$(echo "$ENABLE_IP_ALIASES" | sed -e "s/'/''/g")'
EOF
fi
}
# The job of this function is simple, but the basic regular expression syntax makes
# this difficult to read. What we want to do is convert from [0-9]+B, KB, KiB, MB, etc
# into [0-9]+, Ki, Mi, Gi, etc.
# This is done in two steps:
# 1. Convert from [0-9]+X?i?B into [0-9]X? (X denotes the prefix, ? means the field
# is optional.
# 2. Attach an 'i' to the end of the string if we find a letter.
# The two step process is needed to handle the edge case in which we want to convert
# a raw byte count, as the result should be a simple number (e.g. 5B -> 5).
function convert-bytes-gce-kube() {
local -r storage_space=$1
echo "${storage_space}" | sed -e 's/^\([0-9]\+\)\([A-Z]\)\?i\?B$/\1\2/g' -e 's/\([A-Z]\)$/\1i/'
}
# This should happen both on cluster initialization and node upgrades.
#
# - Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and
# KUBELET_KEY to generate a kubeconfig file for the kubelet to securely
# connect to the apiserver.
function create-salt-kubelet-auth() {
local -r kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/bootstrap-kubeconfig"
if [ ! -e "${kubelet_kubeconfig_file}" ]; then
mkdir -p /srv/salt-overlay/salt/kubelet
(umask 077;
cat > "${kubelet_kubeconfig_file}" <<EOF
apiVersion: v1
kind: Config
users:
- name: kubelet
user:
client-certificate: ${KUBELET_CERT_PATH}
client-key: ${KUBELET_KEY_PATH}
clusters:
- name: local
cluster:
server: https://${KUBERNETES_MASTER_NAME}
certificate-authority: ${CA_CERT_BUNDLE_PATH}
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context
EOF
)
fi
}
# This should happen both on cluster initialization and node upgrades.
#
# - When run as static pods, use the CA_CERT and KUBE_PROXY_TOKEN to generate a
# kubeconfig file for the kube-proxy to securely connect to the apiserver.
function create-salt-kubeproxy-auth() {
local -r kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig"
if [ ! -e "${kube_proxy_kubeconfig_file}" ]; then
mkdir -p /srv/salt-overlay/salt/kube-proxy
(umask 077;
cat > "${kube_proxy_kubeconfig_file}" <<EOF
apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
clusters:
- name: local
cluster:
certificate-authority-data: ${CA_CERT_BUNDLE}
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context
EOF
)
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
# TODO(zmerlynn): Now we REALLy have no excuse not to do the reboot
# optimization.
local -r server_binary_tar_urls=( $(split-commas "${SERVER_BINARY_TAR_URL}") )
local -r server_binary_tar="${server_binary_tar_urls[0]##*/}"
if [[ -n "${SERVER_BINARY_TAR_HASH:-}" ]]; then
local -r server_binary_tar_hash="${SERVER_BINARY_TAR_HASH}"
else
echo "Downloading binary release sha1 (not found in env)"
download-or-bust "" "${server_binary_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
local -r server_binary_tar_hash=$(cat "${server_binary_tar}.sha1")
fi
echo "Downloading binary release tar (${server_binary_tar_urls[@]})"
download-or-bust "${server_binary_tar_hash}" "${server_binary_tar_urls[@]}"
local -r salt_tar_urls=( $(split-commas "${SALT_TAR_URL}") )
local -r salt_tar="${salt_tar_urls[0]##*/}"
if [[ -n "${SALT_TAR_HASH:-}" ]]; then
local -r salt_tar_hash="${SALT_TAR_HASH}"
else
echo "Downloading Salt tar sha1 (not found in env)"
download-or-bust "" "${salt_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
local -r salt_tar_hash=$(cat "${salt_tar}.sha1")
fi
echo "Downloading Salt tar (${salt_tar_urls[@]})"
download-or-bust "${salt_tar_hash}" "${salt_tar_urls[@]}"
echo "Unpacking Salt tree and checking integrity of binary release tar"
rm -rf kubernetes
tar xzf "${salt_tar}" && tar tzf "${server_binary_tar}" > /dev/null
}
function download-release() {
# In case of failure checking integrity of release, retry.
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running release install script"
kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR_URL##*/}"
}
function fix-apt-sources() {
sed -i -e "\|^deb.*http://http.debian.net/debian| s/^/#/" /etc/apt/sources.list
sed -i -e "\|^deb.*http://ftp.debian.org/debian| s/^/#/" /etc/apt/sources.list.d/backports.list
}
function salt-run-local() {
cat <<EOF >/etc/salt/minion.d/local.conf
file_client: local
file_roots:
base:
- /srv/salt
EOF
}
function salt-debug-log() {
cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
log_level: debug
log_level_logfile: debug
EOF
}
function salt-node-role() {
local -r kubelet_bootstrap_kubeconfig="/srv/salt-overlay/salt/kubelet/bootstrap-kubeconfig"
local -r kubelet_kubeconfig="/srv/salt-overlay/salt/kubelet/kubeconfig"
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
roles:
- kubernetes-pool
cloud: gce
api_servers: '${KUBERNETES_MASTER_NAME}'
kubelet_bootstrap_kubeconfig: /var/lib/kubelet/bootstrap-kubeconfig
kubelet_kubeconfig: /var/lib/kubelet/kubeconfig
EOF
}
function env-to-grains {
local key=$1
local env_key=`echo $key | tr '[:lower:]' '[:upper:]'`
local value=${!env_key:-}
if [[ -n "${value}" ]]; then
# Note this is yaml, so indentation matters
cat <<EOF >>/etc/salt/minion.d/grains.conf
${key}: '$(echo "${value}" | sed -e "s/'/''/g")'
EOF
fi
}
function node-docker-opts() {
if [[ -n "${EXTRA_DOCKER_OPTS-}" ]]; then
DOCKER_OPTS="${DOCKER_OPTS:-} ${EXTRA_DOCKER_OPTS}"
fi
# Decide whether to enable a docker registry mirror. This is taken from
# the "kube-env" metadata value.
if [[ -n "${DOCKER_REGISTRY_MIRROR_URL:-}" ]]; then
echo "Enable docker registry mirror at: ${DOCKER_REGISTRY_MIRROR_URL}"
DOCKER_OPTS="${DOCKER_OPTS:-} --registry-mirror=${DOCKER_REGISTRY_MIRROR_URL}"
fi
}
function salt-grains() {
env-to-grains "docker_opts"
env-to-grains "docker_root"
env-to-grains "kubelet_root"
env-to-grains "feature_gates"
}
function configure-salt() {
mkdir -p /etc/salt/minion.d
salt-run-local
salt-node-role
node-docker-opts
salt-grains
install-salt
stop-salt-minion
}
function run-salt() {
echo "== Calling Salt =="
local rc=0
for i in {0..6}; do
salt-call --retcode-passthrough --local state.highstate && rc=0 || rc=$?
if [[ "${rc}" == 0 ]]; then
return 0
fi
done
echo "Salt failed to run repeatedly" >&2
return "${rc}"
}
function run-user-script() {
if curl-metadata k8s-user-startup-script > "${INSTALL_DIR}/k8s-user-script.sh"; then
user_script=$(cat "${INSTALL_DIR}/k8s-user-script.sh")
fi
if [[ ! -z ${user_script:-} ]]; then
chmod u+x "${INSTALL_DIR}/k8s-user-script.sh"
echo "== running user startup script =="
"${INSTALL_DIR}/k8s-user-script.sh"
fi
}
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
echo "Support for debian master has been removed"
exit 1
fi
if [[ -z "${is_push}" ]]; then
echo "== kube-up node config starting =="
set-broken-motd
ensure-basic-networking
fix-apt-sources
ensure-install-dir
ensure-packages
set-kube-env
auto-upgrade
ensure-local-disks
create-node-pki
create-salt-pillar
create-salt-kubelet-auth
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
create-salt-kubeproxy-auth
fi
download-release
configure-salt
remove-docker-artifacts
config-ip-firewall
run-salt
reset-motd
run-user-script
echo "== kube-up node config done =="
else
echo "== kube-push node config starting =="
ensure-basic-networking
ensure-install-dir
set-kube-env
create-salt-pillar
download-release
reset-motd
run-salt
echo "== kube-push node config done =="
fi

View File

@ -0,0 +1,8 @@
approvers:
- euank
- yifan-gu
- ethernetdan
reviewers:
- euank
- yifan-gu
- ethernetdan

View File

@ -0,0 +1,8 @@
# Container Linux image
The [Container Linux Operating System](https://coreos.com/why/) is a Linux distribution optimized for running containers securely at scale.
CoreOS provides [a Container Linux image](https://coreos.com/os/docs/latest/booting-on-google-compute-engine.html) for Google Cloud Platform (GCP).
This folder contains configuration and tooling to allow kube-up to create a Kubernetes cluster on Google Cloud Platform running on the official Container Linux image.
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/gce/container-linux/README.md?pixel)]()

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,182 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# Use --retry-connrefused opt only if it's supported by curl.
CURL_RETRY_CONNREFUSED=""
if curl --help | grep -q -- '--retry-connrefused'; then
CURL_RETRY_CONNREFUSED='--retry-connrefused'
fi
function download-kube-env {
# Fetch kube-env from GCE metadata server.
local -r tmp_kube_env="/tmp/kube-env.yaml"
curl --fail --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o "${tmp_kube_env}" \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env
# Convert the yaml format file into a shell-style file.
sed 's/: /=/' < "${tmp_kube_env}" > "${KUBE_HOME}/kube-env"
rm -f "${tmp_kube_env}"
}
function validate-hash {
local -r file="$1"
local -r expected="$2"
actual=$(sha1sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} =="
return 1
fi
}
# Retry a download until we get it. Takes a hash and a set of URLs.
#
# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown.
# $2+ are the URLs to download.
function download-or-bust {
local -r hash="$1"
shift 1
local -r urls=( $* )
while true; do
for url in "${urls[@]}"; do
local file="${url##*/}"
rm -f "${file}"
if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 ${CURL_RETRY_CONNREFUSED} "${url}"; then
echo "== Failed to download ${url}. Retrying. =="
elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
}
function split-commas {
echo $1 | tr "," "\n"
}
# Downloads kubernetes binaries and kube-system manifest tarball, unpacks them,
# and places them into suitable directories. Files are placed in /opt/kubernetes.
function install-kube-binary-config {
cd "${KUBE_HOME}"
local -r server_binary_tar_urls=( $(split-commas "${SERVER_BINARY_TAR_URL}") )
local -r server_binary_tar="${server_binary_tar_urls[0]##*/}"
if [[ -n "${SERVER_BINARY_TAR_HASH:-}" ]]; then
local -r server_binary_tar_hash="${SERVER_BINARY_TAR_HASH}"
else
echo "Downloading binary release sha1 (not found in env)"
download-or-bust "" "${server_binary_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
local -r server_binary_tar_hash=$(cat "${server_binary_tar}.sha1")
fi
echo "Downloading binary release tar"
download-or-bust "${server_binary_tar_hash}" "${server_binary_tar_urls[@]}"
tar xzf "${KUBE_HOME}/${server_binary_tar}" -C "${KUBE_HOME}" --overwrite
# Copy docker_tag and image files to ${KUBE_HOME}/kube-docker-files.
src_dir="${KUBE_HOME}/kubernetes/server/bin"
dst_dir="${KUBE_HOME}/kube-docker-files"
mkdir -p "${dst_dir}"
cp "${src_dir}/"*.docker_tag "${dst_dir}"
if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then
cp "${src_dir}/kube-proxy.tar" "${dst_dir}"
else
cp "${src_dir}/kube-apiserver.tar" "${dst_dir}"
cp "${src_dir}/kube-controller-manager.tar" "${dst_dir}"
cp "${src_dir}/kube-scheduler.tar" "${dst_dir}"
cp -r "${KUBE_HOME}/kubernetes/addons" "${dst_dir}"
fi
local -r kube_bin="${KUBE_HOME}/bin"
mv "${src_dir}/kubelet" "${kube_bin}"
mv "${src_dir}/kubectl" "${kube_bin}"
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]] || \
[[ "${NETWORK_PROVIDER:-}" == "cni" ]]; then
local -r cni_version="v0.6.0"
local -r cni_tar="cni-plugins-amd64-${cni_version}.tgz"
local -r cni_sha1="d595d3ded6499a64e8dac02466e2f5f2ce257c9f"
download-or-bust "${cni_sha1}" "https://storage.googleapis.com/kubernetes-release/network-plugins/${cni_tar}"
local -r cni_dir="${KUBE_HOME}/cni"
mkdir -p "${cni_dir}/bin"
tar xzf "${KUBE_HOME}/${cni_tar}" -C "${cni_dir}/bin" --overwrite
mv "${cni_dir}/bin"/* "${kube_bin}"
rmdir "${cni_dir}/bin"
rm -f "${KUBE_HOME}/${cni_tar}"
fi
mv "${KUBE_HOME}/kubernetes/LICENSES" "${KUBE_HOME}"
mv "${KUBE_HOME}/kubernetes/kubernetes-src.tar.gz" "${KUBE_HOME}"
# Put kube-system pods manifests in ${KUBE_HOME}/kube-manifests/.
dst_dir="${KUBE_HOME}/kube-manifests"
mkdir -p "${dst_dir}"
local -r manifests_tar_urls=( $(split-commas "${KUBE_MANIFESTS_TAR_URL}") )
local -r manifests_tar="${manifests_tar_urls[0]##*/}"
if [ -n "${KUBE_MANIFESTS_TAR_HASH:-}" ]; then
local -r manifests_tar_hash="${KUBE_MANIFESTS_TAR_HASH}"
else
echo "Downloading k8s manifests sha1 (not found in env)"
download-or-bust "" "${manifests_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
local -r manifests_tar_hash=$(cat "${manifests_tar}.sha1")
fi
echo "Downloading k8s manifests tar"
download-or-bust "${manifests_tar_hash}" "${manifests_tar_urls[@]}"
tar xzf "${KUBE_HOME}/${manifests_tar}" -C "${dst_dir}" --overwrite
local -r kube_addon_registry="${KUBE_ADDON_REGISTRY:-gcr.io/google_containers}"
if [[ "${kube_addon_registry}" != "gcr.io/google_containers" ]]; then
find "${dst_dir}" -name \*.yaml -or -name \*.yaml.in | \
xargs sed -ri "s@(image:\s.*)gcr.io/google_containers@\1${kube_addon_registry}@"
find "${dst_dir}" -name \*.manifest -or -name \*.json | \
xargs sed -ri "s@(image\":\s+\")gcr.io/google_containers@\1${kube_addon_registry}@"
fi
cp "${dst_dir}/kubernetes/gci-trusty/container-linux-configure-helper.sh" "${KUBE_HOME}/bin/configure-helper.sh"
chmod -R 755 "${kube_bin}"
# Clean up.
rm -rf "${KUBE_HOME}/kubernetes"
rm -f "${KUBE_HOME}/${server_binary_tar}"
rm -f "${KUBE_HOME}/${server_binary_tar}.sha1"
rm -f "${KUBE_HOME}/${manifests_tar}"
rm -f "${KUBE_HOME}/${manifests_tar}.sha1"
}
######### Main Function ##########
echo "Start to install kubernetes files"
KUBE_HOME="/opt/kubernetes"
mkdir -p "${KUBE_HOME}"
download-kube-env
source "${KUBE_HOME}/kube-env"
install-kube-binary-config
echo "Done for installing kubernetes files"
# On Container Linux, the hosts is in /usr/share/baselayout/hosts
# So we need to manually populdate the hosts file here on gce.
echo "127.0.0.1 localhost" >> /etc/hosts
echo "::1 localhost" >> /etc/hosts
echo "Configuring hostname"
hostnamectl set-hostname $(hostname | cut -f1 -d.)

View File

@ -0,0 +1,83 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is for master and node instance health monitoring, which is
# packed in kube-manifest tarball. It is executed through a systemd service
# in cluster/gce/gci/<master/node>.yaml. The env variables come from an env
# file provided by the systemd service.
set -o nounset
set -o pipefail
# We simply kill the process when there is a failure. Another systemd service will
# automatically restart the process.
function docker_monitoring {
while [ 1 ]; do
if ! timeout 60 docker ps > /dev/null; then
echo "Docker daemon failed!"
pkill docker
# Wait for a while, as we don't want to kill it again before it is really up.
sleep 30
else
sleep "${SLEEP_SECONDS}"
fi
done
}
function kubelet_monitoring {
echo "Wait for 2 minutes for kubelet to be fuctional"
# TODO(andyzheng0831): replace it with a more reliable method if possible.
sleep 120
local -r max_seconds=10
local output=""
while [ 1 ]; do
if ! output=$(curl --insecure -m "${max_seconds}" -f -s -S https://127.0.0.1:${KUBELET_PORT:-10250}/healthz 2>&1); then
# Print the response and/or errors.
echo $output
echo "Kubelet is unhealthy!"
pkill kubelet
# Wait for a while, as we don't want to kill it again before it is really up.
sleep 60
else
sleep "${SLEEP_SECONDS}"
fi
done
}
############## Main Function ################
if [[ "$#" -ne 1 ]]; then
echo "Usage: health-monitor.sh <docker/kubelet>"
exit 1
fi
KUBE_ENV="/home/kubernetes/kube-env"
if [[ ! -e "${KUBE_ENV}" ]]; then
echo "The ${KUBE_ENV} file does not exist!! Terminate health monitoring"
exit 1
fi
SLEEP_SECONDS=10
component=$1
echo "Start kubernetes health monitoring for ${component}"
source "${KUBE_ENV}"
if [[ "${component}" == "docker" ]]; then
docker_monitoring
elif [[ "${component}" == "kubelet" ]]; then
kubelet_monitoring
else
echo "Health monitoring for component "${component}" is not supported!"
fi

View File

@ -0,0 +1,19 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constants for the Container Linux distro.
# This file intentionally left blank

View File

@ -0,0 +1,139 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for the Container Linux distro.
source "${KUBE_ROOT}/cluster/gce/container-linux/helper.sh"
# create-master-instance creates the master instance. If called with
# an argument, the argument is used as the name to a reserved IP
# address for the master. (In the case of upgrade/repair, we re-use
# the same IP.)
#
# It requires a whole slew of assumed variables, partially due to to
# the call to write-master-env. Listing them would be rather
# futile. Instead, we list the required calls to ensure any additional
#
# variables are set:
# ensure-temp-dir
# detect-project
# get-bearer-token
function create-master-instance {
local address=""
[[ -n ${1:-} ]] && address="${1}"
write-master-env
create-master-instance-internal "${MASTER_NAME}" "${address}"
}
function replicate-master-instance() {
local existing_master_zone="${1}"
local existing_master_name="${2}"
local existing_master_replicas="${3}"
local kube_env="$(get-metadata "${existing_master_zone}" "${existing_master_name}" kube-env)"
# Substitute INITIAL_ETCD_CLUSTER to enable etcd clustering.
kube_env="$(echo "${kube_env}" | grep -v "INITIAL_ETCD_CLUSTER")"
kube_env="$(echo -e "${kube_env}\nINITIAL_ETCD_CLUSTER: '${existing_master_replicas},${REPLICA_NAME}'")"
ETCD_CA_KEY="$(echo "${kube_env}" | grep "ETCD_CA_KEY" | sed "s/^.*: '//" | sed "s/'$//")"
ETCD_CA_CERT="$(echo "${kube_env}" | grep "ETCD_CA_CERT" | sed "s/^.*: '//" | sed "s/'$//")"
create-etcd-certs "${REPLICA_NAME}" "${ETCD_CA_CERT}" "${ETCD_CA_KEY}"
kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_KEY")"
kube_env="$(echo -e "${kube_env}\nETCD_PEER_KEY: '${ETCD_PEER_KEY_BASE64}'")"
kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_CERT")"
kube_env="$(echo -e "${kube_env}\nETCD_PEER_CERT: '${ETCD_PEER_CERT_BASE64}'")"
echo "${kube_env}" > ${KUBE_TEMP}/master-kube-env.yaml
get-metadata "${existing_master_zone}" "${existing_master_name}" cluster-name > "${KUBE_TEMP}/cluster-name.txt"
create-master-instance-internal "${REPLICA_NAME}"
}
function create-master-instance-internal() {
local gcloud="gcloud"
local retries=5
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
gcloud="gcloud beta"
fi
local -r master_name="${1}"
local -r address="${2:-}"
local preemptible_master=""
if [[ "${PREEMPTIBLE_MASTER:-}" == "true" ]]; then
preemptible_master="--preemptible --maintenance-policy TERMINATE"
fi
local network=$(make-gcloud-network-argument \
"${NETWORK_PROJECT}" "${REGION}" "${NETWORK}" "${SUBNETWORK:-}" \
"${address:-}" "${ENABLE_IP_ALIASES:-}" "${IP_ALIAS_SIZE:-}")
local metadata="kube-env=${KUBE_TEMP}/master-kube-env.yaml"
metadata="${metadata},user-data=${KUBE_ROOT}/cluster/gce/container-linux/master.yaml"
metadata="${metadata},configure-sh=${KUBE_ROOT}/cluster/gce/container-linux/configure.sh"
metadata="${metadata},cluster-name=${KUBE_TEMP}/cluster-name.txt"
local disk="name=${master_name}-pd"
disk="${disk},device-name=master-pd"
disk="${disk},mode=rw"
disk="${disk},boot=no"
disk="${disk},auto-delete=no"
for attempt in $(seq 1 ${retries}); do
if result=$(${gcloud} compute instances create "${master_name}" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--machine-type "${MASTER_SIZE}" \
--image-project="${MASTER_IMAGE_PROJECT}" \
--image "${MASTER_IMAGE}" \
--tags "${MASTER_TAG}" \
--scopes "storage-ro,compute-rw,monitoring,logging-write" \
--metadata-from-file "${metadata}" \
--disk "${disk}" \
--boot-disk-size "${MASTER_ROOT_DISK_SIZE}" \
${MASTER_MIN_CPU_ARCHITECTURE:+"--min-cpu-platform=${MASTER_MIN_CPU_ARCHITECTURE}"} \
${preemptible_master} \
${network} 2>&1); then
echo "${result}" >&2
return 0
else
echo "${result}" >&2
if [[ ! "${result}" =~ "try again later" ]]; then
echo "Failed to create master instance due to non-retryable error" >&2
return 1
fi
sleep 10
fi
done
echo "Failed to create master instance despite ${retries} attempts" >&2
return 1
}
function get-metadata() {
local zone="${1}"
local name="${2}"
local key="${3}"
local metadata_url="http://metadata.google.internal/computeMetadata/v1/instance/attributes/${key}"
gcloud compute ssh "${name}" \
--project "${PROJECT}" \
--zone "${zone}" \
--command "curl '${metadata_url}' -H 'Metadata-Flavor: Google'" 2>/dev/null
}

View File

@ -0,0 +1,57 @@
#cloud-config
coreos:
update:
reboot-strategy: off
units:
- name: locksmithd.service
mask: true
- name: kube-master-installation.service
command: start
content: |
[Unit]
Description=Download and install k8s binaries and configurations
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/bin/mkdir -p /opt/kubernetes/bin
# Use --retry-connrefused opt only if it's supported by curl.
ExecStartPre=/bin/bash -c 'OPT=""; if curl --help | grep -q -- "--retry-connrefused"; then OPT="--retry-connrefused"; fi; /usr/bin/curl --fail --retry 5 --retry-delay 3 $OPT --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh'
ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure.sh
ExecStart=/opt/kubernetes/bin/configure.sh
[Install]
WantedBy=kubernetes.target
- name: kube-master-configuration.service
command: start
content: |
[Unit]
Description=Configure kubernetes master
After=kube-master-installation.service
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure-helper.sh
ExecStart=/opt/kubernetes/bin/configure-helper.sh
[Install]
WantedBy=kubernetes.target
- name: kubernetes.target
enable: true
command: start
content: |
[Unit]
Description=Kubernetes
[Install]
WantedBy=multi-user.target
- name: docker.service
drop-ins:
- name: "use-cgroupfs-driver.conf"
# This is required for setting cgroup parent in the current ~1.4 per-pod cgroup impl
content: |
[Service]
Environment="DOCKER_CGROUPS=--exec-opt native.cgroupdriver="

View File

@ -0,0 +1,35 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for the Container Linux distro.
source "${KUBE_ROOT}/cluster/gce/container-linux/helper.sh"
function get-node-instance-metadata {
local metadata=""
metadata+="kube-env=${KUBE_TEMP}/node-kube-env.yaml,"
metadata+="user-data=${KUBE_ROOT}/cluster/gce/container-linux/node.yaml,"
metadata+="configure-sh=${KUBE_ROOT}/cluster/gce/container-linux/configure.sh,"
metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt"
echo "${metadata}"
}
# $1: template name (required).
function create-node-instance-template {
local template_name="$1"
create-node-template "$template_name" "${scope_flags[*]}" "$(get-node-instance-metadata)"
# TODO(euank): We should include update-strategy here. We should also switch to ignition
}

View File

@ -0,0 +1,57 @@
#cloud-config
coreos:
update:
reboot-strategy: off
units:
- name: locksmithd.service
mask: true
- name: kube-node-installation.service
command: start
content: |
[Unit]
Description=Download and install k8s binaries and configurations
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/bin/mkdir -p /opt/kubernetes/bin
# Use --retry-connrefused opt only if it's supported by curl.
ExecStartPre=/bin/bash -c 'OPT=""; if curl --help | grep -q -- "--retry-connrefused"; then OPT="--retry-connrefused"; fi; /usr/bin/curl --fail --retry 5 --retry-delay 3 $OPT --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh'
ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure.sh
ExecStart=/opt/kubernetes/bin/configure.sh
[Install]
WantedBy=kubernetes.target
- name: kube-node-configuration.service
command: start
content: |
[Unit]
Description=Configure kubernetes master
After=kube-node-installation.service
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure-helper.sh
ExecStart=/opt/kubernetes/bin/configure-helper.sh
[Install]
WantedBy=kubernetes.target
- name: kubernetes.target
enable: true
command: start
content: |
[Unit]
Description=Kubernetes
[Install]
WantedBy=multi-user.target
- name: docker.service
drop-ins:
- name: "use-cgroupfs-driver.conf"
# This is required for setting cgroup parent in the current ~1.4 per-pod cgroup impl
content: |
[Service]
Environment="DOCKER_CGROUPS=--exec-opt native.cgroupdriver="

1
vendor/k8s.io/kubernetes/cluster/gce/cos generated vendored Symbolic link
View File

@ -0,0 +1 @@
gci

32
vendor/k8s.io/kubernetes/cluster/gce/debian/node-helper.sh generated vendored Executable file
View File

@ -0,0 +1,32 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for debian os distro
function get-node-instance-metadata {
local metadata=""
metadata+="startup-script=${KUBE_TEMP}/configure-vm.sh,"
metadata+="kube-env=${KUBE_TEMP}/node-kube-env.yaml,"
metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt"
echo "${metadata}"
}
# $1: template name (required)
function create-node-instance-template {
local template_name="$1"
prepare-startup-script
create-node-template "$template_name" "${scope_flags}" "$(get-node-instance-metadata)"
}

View File

@ -0,0 +1,35 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A utility for deleting target pools and forwarding rules that are unattached to VMs
PROJECT=${PROJECT:-kubernetes-jenkins}
REGION=${REGION:-us-central1}
LIST=$(gcloud --project=${PROJECT} compute target-pools list --format='value(name)')
result=0
for x in ${LIST}; do
if ! gcloud compute --project=${PROJECT} target-pools get-health "${x}" --region=${REGION} 2>/dev/null >/dev/null; then
echo DELETING "${x}"
gcloud compute --project=${PROJECT} firewall-rules delete "k8s-fw-${x}" -q
gcloud compute --project=${PROJECT} forwarding-rules delete "${x}" --region=${REGION} -q
gcloud compute --project=${PROJECT} addresses delete "${x}" --region=${REGION} -q
gcloud compute --project=${PROJECT} target-pools delete "${x}" --region=${REGION} -q
result=1
fi
done
exit ${result}

11
vendor/k8s.io/kubernetes/cluster/gce/gci/README.md generated vendored Normal file
View File

@ -0,0 +1,11 @@
# Container-VM Image
[Container-VM Image](https://cloud.google.com/compute/docs/containers/vm-image/)
is a container-optimized OS image for the Google Cloud Platform (GCP). It is
primarily for running Google services on GCP. Unlike the open preview version
of container-vm, the new Container-VM Image is based on the open source
ChromiumOS project, allowing us greater control over the build management,
security compliance, and customizations for GCP.
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/gce/gci/README.md?pixel)]()

File diff suppressed because it is too large Load Diff

352
vendor/k8s.io/kubernetes/cluster/gce/gci/configure.sh generated vendored Normal file
View File

@ -0,0 +1,352 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Due to the GCE custom metadata size limit, we split the entire script into two
# files configure.sh and configure-helper.sh. The functionality of downloading
# kubernetes configuration, manifests, docker images, and binary files are
# put in configure.sh, which is uploaded via GCE custom metadata.
set -o errexit
set -o nounset
set -o pipefail
### Hardcoded constants
DEFAULT_CNI_VERSION="v0.6.0"
DEFAULT_CNI_SHA1="d595d3ded6499a64e8dac02466e2f5f2ce257c9f"
DEFAULT_NPD_VERSION="v0.4.1"
DEFAULT_NPD_SHA1="a57a3fe64cab8a18ec654f5cef0aec59dae62568"
DEFAULT_MOUNTER_TAR_SHA="8003b798cf33c7f91320cd6ee5cec4fa22244571"
###
# Use --retry-connrefused opt only if it's supported by curl.
CURL_RETRY_CONNREFUSED=""
if curl --help | grep -q -- '--retry-connrefused'; then
CURL_RETRY_CONNREFUSED='--retry-connrefused'
fi
function set-broken-motd {
cat > /etc/motd <<EOF
Broken (or in progress) Kubernetes node setup! Check the cluster initialization status
using the following commands.
Master instance:
- sudo systemctl status kube-master-installation
- sudo systemctl status kube-master-configuration
Node instance:
- sudo systemctl status kube-node-installation
- sudo systemctl status kube-node-configuration
EOF
}
function download-kube-env {
# Fetch kube-env from GCE metadata server.
local -r tmp_kube_env="/tmp/kube-env.yaml"
curl --fail --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o "${tmp_kube_env}" \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env
# Convert the yaml format file into a shell-style file.
eval $(python -c '''
import pipes,sys,yaml
for k,v in yaml.load(sys.stdin).iteritems():
print("readonly {var}={value}".format(var = k, value = pipes.quote(str(v))))
''' < "${tmp_kube_env}" > "${KUBE_HOME}/kube-env")
rm -f "${tmp_kube_env}"
}
function download-kube-master-certs {
# Fetch kube-env from GCE metadata server.
local -r tmp_kube_master_certs="/tmp/kube-master-certs.yaml"
curl --fail --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o "${tmp_kube_master_certs}" \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-master-certs
# Convert the yaml format file into a shell-style file.
eval $(python -c '''
import pipes,sys,yaml
for k,v in yaml.load(sys.stdin).iteritems():
print("readonly {var}={value}".format(var = k, value = pipes.quote(str(v))))
''' < "${tmp_kube_master_certs}" > "${KUBE_HOME}/kube-master-certs")
rm -f "${tmp_kube_master_certs}"
}
function validate-hash {
local -r file="$1"
local -r expected="$2"
actual=$(sha1sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} =="
return 1
fi
}
# Retry a download until we get it. Takes a hash and a set of URLs.
#
# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown.
# $2+ are the URLs to download.
function download-or-bust {
local -r hash="$1"
shift 1
local -r urls=( $* )
while true; do
for url in "${urls[@]}"; do
local file="${url##*/}"
rm -f "${file}"
if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 ${CURL_RETRY_CONNREFUSED} "${url}"; then
echo "== Failed to download ${url}. Retrying. =="
elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
}
function is-preloaded {
local -r key=$1
local -r value=$2
grep -qs "${key},${value}" "${KUBE_HOME}/preload_info"
}
function split-commas {
echo $1 | tr "," "\n"
}
function install-gci-mounter-tools {
CONTAINERIZED_MOUNTER_HOME="${KUBE_HOME}/containerized_mounter"
local -r mounter_tar_sha="${DEFAULT_MOUNTER_TAR_SHA}"
if is-preloaded "mounter" "${mounter_tar_sha}"; then
echo "mounter is preloaded."
return
fi
echo "Downloading gci mounter tools."
mkdir -p "${CONTAINERIZED_MOUNTER_HOME}"
chmod a+x "${CONTAINERIZED_MOUNTER_HOME}"
mkdir -p "${CONTAINERIZED_MOUNTER_HOME}/rootfs"
download-or-bust "${mounter_tar_sha}" "https://storage.googleapis.com/kubernetes-release/gci-mounter/mounter.tar"
cp "${KUBE_HOME}/kubernetes/server/bin/mounter" "${CONTAINERIZED_MOUNTER_HOME}/mounter"
chmod a+x "${CONTAINERIZED_MOUNTER_HOME}/mounter"
mv "${KUBE_HOME}/mounter.tar" /tmp/mounter.tar
tar xf /tmp/mounter.tar -C "${CONTAINERIZED_MOUNTER_HOME}/rootfs"
rm /tmp/mounter.tar
mkdir -p "${CONTAINERIZED_MOUNTER_HOME}/rootfs/var/lib/kubelet"
}
# Install node problem detector binary.
function install-node-problem-detector {
if [[ -n "${NODE_PROBLEM_DETECTOR_VERSION:-}" ]]; then
local -r npd_version="${NODE_PROBLEM_DETECTOR_VERSION}"
local -r npd_sha1="${NODE_PROBLEM_DETECTOR_TAR_HASH}"
else
local -r npd_version="${DEFAULT_NPD_VERSION}"
local -r npd_sha1="${DEFAULT_NPD_SHA1}"
fi
if is-preloaded "node-problem-detector" "${npd_sha1}"; then
echo "node-problem-detector is preloaded."
return
fi
echo "Downloading node problem detector."
local -r npd_release_path="https://storage.googleapis.com/kubernetes-release"
local -r npd_tar="node-problem-detector-${npd_version}.tar.gz"
download-or-bust "${npd_sha1}" "${npd_release_path}/node-problem-detector/${npd_tar}"
local -r npd_dir="${KUBE_HOME}/node-problem-detector"
mkdir -p "${npd_dir}"
tar xzf "${KUBE_HOME}/${npd_tar}" -C "${npd_dir}" --overwrite
mv "${npd_dir}/bin"/* "${KUBE_BIN}"
chmod a+x "${KUBE_BIN}/node-problem-detector"
rmdir "${npd_dir}/bin"
rm -f "${KUBE_HOME}/${npd_tar}"
}
function install-cni-binaries {
local -r cni_tar="cni-plugins-amd64-${DEFAULT_CNI_VERSION}.tgz"
local -r cni_sha1="${DEFAULT_CNI_SHA1}"
if is-preloaded "${cni_tar}" "${cni_sha1}"; then
echo "${cni_tar} is preloaded."
return
fi
echo "Downloading cni binaries"
download-or-bust "${cni_sha1}" "https://storage.googleapis.com/kubernetes-release/network-plugins/${cni_tar}"
local -r cni_dir="${KUBE_HOME}/cni"
mkdir -p "${cni_dir}/bin"
tar xzf "${KUBE_HOME}/${cni_tar}" -C "${cni_dir}/bin" --overwrite
mv "${cni_dir}/bin"/* "${KUBE_BIN}"
rmdir "${cni_dir}/bin"
rm -f "${KUBE_HOME}/${cni_tar}"
}
function install-kube-manifests {
# Put kube-system pods manifests in ${KUBE_HOME}/kube-manifests/.
local dst_dir="${KUBE_HOME}/kube-manifests"
mkdir -p "${dst_dir}"
local -r manifests_tar_urls=( $(split-commas "${KUBE_MANIFESTS_TAR_URL}") )
local -r manifests_tar="${manifests_tar_urls[0]##*/}"
if [ -n "${KUBE_MANIFESTS_TAR_HASH:-}" ]; then
local -r manifests_tar_hash="${KUBE_MANIFESTS_TAR_HASH}"
else
echo "Downloading k8s manifests sha1 (not found in env)"
download-or-bust "" "${manifests_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
local -r manifests_tar_hash=$(cat "${manifests_tar}.sha1")
fi
if is-preloaded "${manifests_tar}" "${manifests_tar_hash}"; then
echo "${manifests_tar} is preloaded."
return
fi
echo "Downloading k8s manifests tar"
download-or-bust "${manifests_tar_hash}" "${manifests_tar_urls[@]}"
tar xzf "${KUBE_HOME}/${manifests_tar}" -C "${dst_dir}" --overwrite
local -r kube_addon_registry="${KUBE_ADDON_REGISTRY:-gcr.io/google_containers}"
if [[ "${kube_addon_registry}" != "gcr.io/google_containers" ]]; then
find "${dst_dir}" -name \*.yaml -or -name \*.yaml.in | \
xargs sed -ri "s@(image:\s.*)gcr.io/google_containers@\1${kube_addon_registry}@"
find "${dst_dir}" -name \*.manifest -or -name \*.json | \
xargs sed -ri "s@(image\":\s+\")gcr.io/google_containers@\1${kube_addon_registry}@"
fi
cp "${dst_dir}/kubernetes/gci-trusty/gci-configure-helper.sh" "${KUBE_BIN}/configure-helper.sh"
cp "${dst_dir}/kubernetes/gci-trusty/health-monitor.sh" "${KUBE_BIN}/health-monitor.sh"
rm -f "${KUBE_HOME}/${manifests_tar}"
rm -f "${KUBE_HOME}/${manifests_tar}.sha1"
}
# A helper function for loading a docker image. It keeps trying up to 5 times.
#
# $1: Full path of the docker image
function try-load-docker-image {
local -r img=$1
echo "Try to load docker image file ${img}"
# Temporarily turn off errexit, because we don't want to exit on first failure.
set +e
local -r max_attempts=5
local -i attempt_num=1
until timeout 30 ${LOAD_IMAGE_COMMAND:-docker load -i} "${img}"; do
if [[ "${attempt_num}" == "${max_attempts}" ]]; then
echo "Fail to load docker image file ${img} after ${max_attempts} retries. Exit!!"
exit 1
else
attempt_num=$((attempt_num+1))
sleep 5
fi
done
# Re-enable errexit.
set -e
}
# Loads kube-system docker images. It is better to do it before starting kubelet,
# as kubelet will restart docker daemon, which may interfere with loading images.
function load-docker-images {
echo "Start loading kube-system docker images"
local -r img_dir="${KUBE_HOME}/kube-docker-files"
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
try-load-docker-image "${img_dir}/kube-apiserver.tar"
try-load-docker-image "${img_dir}/kube-controller-manager.tar"
try-load-docker-image "${img_dir}/kube-scheduler.tar"
else
try-load-docker-image "${img_dir}/kube-proxy.tar"
fi
}
# Downloads kubernetes binaries and kube-system manifest tarball, unpacks them,
# and places them into suitable directories. Files are placed in /home/kubernetes.
function install-kube-binary-config {
cd "${KUBE_HOME}"
local -r server_binary_tar_urls=( $(split-commas "${SERVER_BINARY_TAR_URL}") )
local -r server_binary_tar="${server_binary_tar_urls[0]##*/}"
if [[ -n "${SERVER_BINARY_TAR_HASH:-}" ]]; then
local -r server_binary_tar_hash="${SERVER_BINARY_TAR_HASH}"
else
echo "Downloading binary release sha1 (not found in env)"
download-or-bust "" "${server_binary_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
local -r server_binary_tar_hash=$(cat "${server_binary_tar}.sha1")
fi
if is-preloaded "${server_binary_tar}" "${server_binary_tar_hash}"; then
echo "${server_binary_tar} is preloaded."
else
echo "Downloading binary release tar"
download-or-bust "${server_binary_tar_hash}" "${server_binary_tar_urls[@]}"
tar xzf "${KUBE_HOME}/${server_binary_tar}" -C "${KUBE_HOME}" --overwrite
# Copy docker_tag and image files to ${KUBE_HOME}/kube-docker-files.
local -r src_dir="${KUBE_HOME}/kubernetes/server/bin"
local dst_dir="${KUBE_HOME}/kube-docker-files"
mkdir -p "${dst_dir}"
cp "${src_dir}/"*.docker_tag "${dst_dir}"
if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then
cp "${src_dir}/kube-proxy.tar" "${dst_dir}"
else
cp "${src_dir}/kube-apiserver.tar" "${dst_dir}"
cp "${src_dir}/kube-controller-manager.tar" "${dst_dir}"
cp "${src_dir}/kube-scheduler.tar" "${dst_dir}"
cp -r "${KUBE_HOME}/kubernetes/addons" "${dst_dir}"
fi
load-docker-images
mv "${src_dir}/kubelet" "${KUBE_BIN}"
mv "${src_dir}/kubectl" "${KUBE_BIN}"
mv "${KUBE_HOME}/kubernetes/LICENSES" "${KUBE_HOME}"
mv "${KUBE_HOME}/kubernetes/kubernetes-src.tar.gz" "${KUBE_HOME}"
fi
if [[ "${KUBERNETES_MASTER:-}" == "false" ]] && \
[[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
install-node-problem-detector
fi
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]] || \
[[ "${NETWORK_PROVIDER:-}" == "cni" ]]; then
install-cni-binaries
fi
# Put kube-system pods manifests in ${KUBE_HOME}/kube-manifests/.
install-kube-manifests
chmod -R 755 "${KUBE_BIN}"
# Install gci mounter related artifacts to allow mounting storage volumes in GCI
install-gci-mounter-tools
# Clean up.
rm -rf "${KUBE_HOME}/kubernetes"
rm -f "${KUBE_HOME}/${server_binary_tar}"
rm -f "${KUBE_HOME}/${server_binary_tar}.sha1"
}
######### Main Function ##########
echo "Start to install kubernetes files"
set-broken-motd
KUBE_HOME="/home/kubernetes"
KUBE_BIN="${KUBE_HOME}/bin"
download-kube-env
source "${KUBE_HOME}/kube-env"
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
download-kube-master-certs
fi
install-kube-binary-config
echo "Done for installing kubernetes files"

View File

@ -0,0 +1,184 @@
#!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Sets up FlexVolume drivers on GCE COS instances using mounting utilities packaged in a Google
# Container Registry image.
# The user-provided FlexVolume driver(s) must be under /flexvolume of the image filesystem.
# For example, the driver k8s/nfs must be located at /flexvolume/k8s~nfs/nfs .
#
# This script should be used on a clean instance, with no FlexVolume installed.
# Should not be run on instances with an existing full or partial installation.
# Upon failure, the script will clean up the partial installation automatically.
#
# Must be executed under /home/kubernetes/bin with sudo.
# Warning: kubelet will be restarted upon successful execution.
set -o errexit
set -o nounset
set -o pipefail
MOUNTER_IMAGE=${1:-}
MOUNTER_PATH=/home/kubernetes/flexvolume_mounter
VOLUME_PLUGIN_DIR=/etc/srv/kubernetes/kubelet-plugins/volume/exec
usage() {
echo "usage: $0 imagename[:tag]"
echo " imagename Name of a Container Registry image. By default the latest image is used."
echo " :tag Container Registry image tag."
exit 1
}
if [ -z ${MOUNTER_IMAGE} ]; then
echo "ERROR: No Container Registry mounter image is specified."
echo
usage
fi
# Unmounts a mount point lazily. If a mount point does not exist, continue silently,
# and without error.
umount_silent() {
umount -l $1 &> /dev/null || /bin/true
}
# Waits for kubelet to restart for 1 minute.
kubelet_wait() {
timeout=60
kubelet_readonly_port=10255
until [[ $timeout -eq 0 ]]; do
printf "."
if [[ $( curl -s http://localhost:${kubelet_readonly_port}/healthz ) == "ok" ]]; then
return 0
fi
sleep 1
timeout=$(( timeout-1 ))
done
# Timed out waiting for kubelet to become healthy.
return 1
}
flex_clean() {
echo
echo "An error has occurred. Cleaning up..."
echo
umount_silent ${VOLUME_PLUGIN_DIR}
rm -rf ${VOLUME_PLUGIN_DIR}
umount_silent ${MOUNTER_PATH}/var/lib/kubelet
umount_silent ${MOUNTER_PATH}
rm -rf ${MOUNTER_PATH}
if [ -n ${IMAGE_URL:-} ]; then
docker rmi -f ${IMAGE_URL} &> /dev/null || /bin/true
fi
if [ -n ${MOUNTER_DEFAULT_NAME:-} ]; then
docker rm -f ${MOUNTER_DEFAULT_NAME} &> /dev/null || /bin/true
fi
}
trap flex_clean ERR
# Generates a bash script that wraps all calls to the actual driver inside mount utilities
# in the chroot environment. Kubelet sees this script as the FlexVolume driver.
generate_chroot_wrapper() {
if [ ! -d ${MOUNTER_PATH}/flexvolume ]; then
echo "Failed to set up FlexVolume driver: cannot find directory '/flexvolume' in the mount utility image."
exit 1
fi
for driver_dir in ${MOUNTER_PATH}/flexvolume/*; do
if [ -d "$driver_dir" ]; then
filecount=$(ls -1 $driver_dir | wc -l)
if [ $filecount -gt 1 ]; then
echo "ERROR: Expected 1 file in the FlexVolume directory but found $filecount."
exit 1
fi
driver_file=$( ls $driver_dir | head -n 1 )
# driver_path points to the actual driver inside the mount utility image,
# relative to image root.
# wrapper_path is the wrapper script location, which is known to kubelet.
driver_path=flexvolume/$( basename $driver_dir )/${driver_file}
wrapper_dir=${VOLUME_PLUGIN_DIR}/$( basename $driver_dir )
wrapper_path=${wrapper_dir}/${driver_file}
mkdir -p $wrapper_dir
cat >$wrapper_path <<EOF
#!/bin/bash
chroot ${MOUNTER_PATH} ${driver_path} "\$@"
EOF
chmod 755 $wrapper_path
echo "FlexVolume driver installed at ${wrapper_path}"
fi
done
}
echo
echo "Importing mount utility image from Container Registry..."
echo
METADATA=http://metadata.google.internal/computeMetadata/v1
SVC_ACCT_ENDPOINT=$METADATA/instance/service-accounts/default
ACCESS_TOKEN=$(curl -s -H 'Metadata-Flavor: Google' $SVC_ACCT_ENDPOINT/token | cut -d'"' -f 4)
PROJECT_ID=$(curl -s -H 'Metadata-Flavor: Google' $METADATA/project/project-id)
IMAGE_URL=gcr.io/${PROJECT_ID}/${MOUNTER_IMAGE}
MOUNTER_DEFAULT_NAME=flexvolume_mounter
sudo -u ${SUDO_USER} docker login -u _token -p $ACCESS_TOKEN https://gcr.io > /dev/null
sudo -u ${SUDO_USER} docker run --name=${MOUNTER_DEFAULT_NAME} ${IMAGE_URL}
docker export ${MOUNTER_DEFAULT_NAME} > /tmp/${MOUNTER_DEFAULT_NAME}.tar
docker rm ${MOUNTER_DEFAULT_NAME} > /dev/null
docker rmi ${IMAGE_URL} > /dev/null
echo
echo "Loading mount utilities onto this instance..."
echo
mkdir -p ${MOUNTER_PATH}
tar xf /tmp/${MOUNTER_DEFAULT_NAME}.tar -C ${MOUNTER_PATH}
# Bind the kubelet directory to one under flexvolume_mounter
mkdir -p ${MOUNTER_PATH}/var/lib/kubelet
mount --rbind /var/lib/kubelet/ ${MOUNTER_PATH}/var/lib/kubelet
mount --make-rshared ${MOUNTER_PATH}/var/lib/kubelet
# Remount the flexvolume_mounter environment with /dev enabled.
mount --bind ${MOUNTER_PATH} ${MOUNTER_PATH}
mount -o remount,dev,exec ${MOUNTER_PATH}
echo
echo "Setting up FlexVolume driver..."
echo
mkdir -p ${VOLUME_PLUGIN_DIR}
mount --bind ${VOLUME_PLUGIN_DIR} ${VOLUME_PLUGIN_DIR}
mount -o remount,exec ${VOLUME_PLUGIN_DIR}
generate_chroot_wrapper
echo
echo "Restarting Kubelet..."
echo
systemctl restart kubelet.service
kubelet_wait
if [ $? -eq 0 ]; then
echo
echo "FlexVolume is ready."
else
echo "ERROR: Timed out after 1 minute waiting for kubelet restart."
fi

View File

@ -0,0 +1,83 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is for master and node instance health monitoring, which is
# packed in kube-manifest tarball. It is executed through a systemd service
# in cluster/gce/gci/<master/node>.yaml. The env variables come from an env
# file provided by the systemd service.
set -o nounset
set -o pipefail
# We simply kill the process when there is a failure. Another systemd service will
# automatically restart the process.
function docker_monitoring {
while [ 1 ]; do
if ! timeout 60 docker ps > /dev/null; then
echo "Docker daemon failed!"
pkill docker
# Wait for a while, as we don't want to kill it again before it is really up.
sleep 120
else
sleep "${SLEEP_SECONDS}"
fi
done
}
function kubelet_monitoring {
echo "Wait for 2 minutes for kubelet to be functional"
# TODO(andyzheng0831): replace it with a more reliable method if possible.
sleep 120
local -r max_seconds=10
local output=""
while [ 1 ]; do
if ! output=$(curl -m "${max_seconds}" -f -s -S http://127.0.0.1:10255/healthz 2>&1); then
# Print the response and/or errors.
echo $output
echo "Kubelet is unhealthy!"
pkill kubelet
# Wait for a while, as we don't want to kill it again before it is really up.
sleep 60
else
sleep "${SLEEP_SECONDS}"
fi
done
}
############## Main Function ################
if [[ "$#" -ne 1 ]]; then
echo "Usage: health-monitor.sh <docker/kubelet>"
exit 1
fi
KUBE_ENV="/home/kubernetes/kube-env"
if [[ ! -e "${KUBE_ENV}" ]]; then
echo "The ${KUBE_ENV} file does not exist!! Terminate health monitoring"
exit 1
fi
SLEEP_SECONDS=10
component=$1
echo "Start kubernetes health monitoring for ${component}"
source "${KUBE_ENV}"
if [[ "${component}" == "docker" ]]; then
docker_monitoring
elif [[ "${component}" == "kubelet" ]]; then
kubelet_monitoring
else
echo "Health monitoring for component "${component}" is not supported!"
fi

32
vendor/k8s.io/kubernetes/cluster/gce/gci/helper.sh generated vendored Executable file
View File

@ -0,0 +1,32 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for GCI distro
# Creates the GCI specific metadata files if they do not exit.
# Assumed var
# KUBE_TEMP
function ensure-gci-metadata-files {
if [[ ! -f "${KUBE_TEMP}/gci-update.txt" ]]; then
echo -n "update_disabled" > "${KUBE_TEMP}/gci-update.txt"
fi
if [[ ! -f "${KUBE_TEMP}/gci-ensure-gke-docker.txt" ]]; then
echo -n "true" > "${KUBE_TEMP}/gci-ensure-gke-docker.txt"
fi
if [[ ! -f "${KUBE_TEMP}/gci-docker-version.txt" ]]; then
echo -n "${GCI_DOCKER_VERSION:-}" > "${KUBE_TEMP}/gci-docker-version.txt"
fi
}

156
vendor/k8s.io/kubernetes/cluster/gce/gci/master-helper.sh generated vendored Executable file
View File

@ -0,0 +1,156 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for GCI distro
source "${KUBE_ROOT}/cluster/gce/gci/helper.sh"
# create-master-instance creates the master instance. If called with
# an argument, the argument is used as the name to a reserved IP
# address for the master. (In the case of upgrade/repair, we re-use
# the same IP.)
#
# It requires a whole slew of assumed variables, partially due to to
# the call to write-master-env. Listing them would be rather
# futile. Instead, we list the required calls to ensure any additional
#
# variables are set:
# ensure-temp-dir
# detect-project
# get-bearer-token
function create-master-instance {
local address=""
[[ -n ${1:-} ]] && address="${1}"
write-master-env
ensure-gci-metadata-files
create-master-instance-internal "${MASTER_NAME}" "${address}"
}
function replicate-master-instance() {
local existing_master_zone="${1}"
local existing_master_name="${2}"
local existing_master_replicas="${3}"
local kube_env="$(get-metadata "${existing_master_zone}" "${existing_master_name}" kube-env)"
# Substitute INITIAL_ETCD_CLUSTER to enable etcd clustering.
kube_env="$(echo "${kube_env}" | grep -v "INITIAL_ETCD_CLUSTER")"
kube_env="$(echo -e "${kube_env}\nINITIAL_ETCD_CLUSTER: '${existing_master_replicas},${REPLICA_NAME}'")"
# Substitute INITIAL_ETCD_CLUSTER_STATE
kube_env="$(echo "${kube_env}" | grep -v "INITIAL_ETCD_CLUSTER_STATE")"
kube_env="$(echo -e "${kube_env}\nINITIAL_ETCD_CLUSTER_STATE: 'existing'")"
ETCD_CA_KEY="$(echo "${kube_env}" | grep "ETCD_CA_KEY" | sed "s/^.*: '//" | sed "s/'$//")"
ETCD_CA_CERT="$(echo "${kube_env}" | grep "ETCD_CA_CERT" | sed "s/^.*: '//" | sed "s/'$//")"
create-etcd-certs "${REPLICA_NAME}" "${ETCD_CA_CERT}" "${ETCD_CA_KEY}"
kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_KEY")"
kube_env="$(echo -e "${kube_env}\nETCD_PEER_KEY: '${ETCD_PEER_KEY_BASE64}'")"
kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_CERT")"
kube_env="$(echo -e "${kube_env}\nETCD_PEER_CERT: '${ETCD_PEER_CERT_BASE64}'")"
echo "${kube_env}" > ${KUBE_TEMP}/master-kube-env.yaml
get-metadata "${existing_master_zone}" "${existing_master_name}" cluster-name > "${KUBE_TEMP}/cluster-name.txt"
get-metadata "${existing_master_zone}" "${existing_master_name}" gci-update-strategy > "${KUBE_TEMP}/gci-update.txt"
get-metadata "${existing_master_zone}" "${existing_master_name}" gci-ensure-gke-docker > "${KUBE_TEMP}/gci-ensure-gke-docker.txt"
get-metadata "${existing_master_zone}" "${existing_master_name}" gci-docker-version > "${KUBE_TEMP}/gci-docker-version.txt"
get-metadata "${existing_master_zone}" "${existing_master_name}" kube-master-certs > "${KUBE_TEMP}/kube-master-certs.yaml"
create-master-instance-internal "${REPLICA_NAME}"
}
function create-master-instance-internal() {
local gcloud="gcloud"
local retries=5
local sleep_sec=10
if [[ "${MASTER_SIZE##*-}" -ge 64 ]]; then # remove everything up to last dash (inclusive)
# Workaround for #55777
retries=30
sleep_sec=60
fi
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
gcloud="gcloud beta"
fi
local -r master_name="${1}"
local -r address="${2:-}"
local preemptible_master=""
if [[ "${PREEMPTIBLE_MASTER:-}" == "true" ]]; then
preemptible_master="--preemptible --maintenance-policy TERMINATE"
fi
local network=$(make-gcloud-network-argument \
"${NETWORK_PROJECT}" "${REGION}" "${NETWORK}" "${SUBNETWORK:-}" \
"${address:-}" "${ENABLE_IP_ALIASES:-}" "${IP_ALIAS_SIZE:-}")
local metadata="kube-env=${KUBE_TEMP}/master-kube-env.yaml"
metadata="${metadata},user-data=${KUBE_ROOT}/cluster/gce/gci/master.yaml"
metadata="${metadata},configure-sh=${KUBE_ROOT}/cluster/gce/gci/configure.sh"
metadata="${metadata},cluster-name=${KUBE_TEMP}/cluster-name.txt"
metadata="${metadata},gci-update-strategy=${KUBE_TEMP}/gci-update.txt"
metadata="${metadata},gci-ensure-gke-docker=${KUBE_TEMP}/gci-ensure-gke-docker.txt"
metadata="${metadata},gci-docker-version=${KUBE_TEMP}/gci-docker-version.txt"
metadata="${metadata},kube-master-certs=${KUBE_TEMP}/kube-master-certs.yaml"
metadata="${metadata},${MASTER_EXTRA_METADATA}"
local disk="name=${master_name}-pd"
disk="${disk},device-name=master-pd"
disk="${disk},mode=rw"
disk="${disk},boot=no"
disk="${disk},auto-delete=no"
for attempt in $(seq 1 ${retries}); do
if result=$(${gcloud} compute instances create "${master_name}" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--machine-type "${MASTER_SIZE}" \
--image-project="${MASTER_IMAGE_PROJECT}" \
--image "${MASTER_IMAGE}" \
--tags "${MASTER_TAG}" \
--scopes "storage-ro,compute-rw,monitoring,logging-write" \
--metadata-from-file "${metadata}" \
--disk "${disk}" \
--boot-disk-size "${MASTER_ROOT_DISK_SIZE}" \
${MASTER_MIN_CPU_ARCHITECTURE:+"--min-cpu-platform=${MASTER_MIN_CPU_ARCHITECTURE}"} \
${preemptible_master} \
${network} 2>&1); then
echo "${result}" >&2
return 0
else
echo "${result}" >&2
if [[ ! "${result}" =~ "try again later" ]]; then
echo "Failed to create master instance due to non-retryable error" >&2
return 1
fi
sleep $sleep_sec
fi
done
echo "Failed to create master instance despite ${retries} attempts" >&2
return 1
}
function get-metadata() {
local zone="${1}"
local name="${2}"
local key="${3}"
gcloud compute ssh "${name}" \
--project "${PROJECT}" \
--zone "${zone}" \
--command "curl \"http://metadata.google.internal/computeMetadata/v1/instance/attributes/${key}\" -H \"Metadata-Flavor: Google\"" 2>/dev/null
}

128
vendor/k8s.io/kubernetes/cluster/gce/gci/master.yaml generated vendored Normal file
View File

@ -0,0 +1,128 @@
#cloud-config
write_files:
- path: /etc/systemd/system/kube-master-installation.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Download and install k8s binaries and configurations
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/bin/mkdir -p /home/kubernetes/bin
ExecStartPre=/bin/mount --bind /home/kubernetes/bin /home/kubernetes/bin
ExecStartPre=/bin/mount -o remount,exec /home/kubernetes/bin
# Use --retry-connrefused opt only if it's supported by curl.
ExecStartPre=/bin/bash -c 'OPT=""; if curl --help | grep -q -- "--retry-connrefused"; then OPT="--retry-connrefused"; fi; /usr/bin/curl --fail --retry 5 --retry-delay 3 $OPT --silent --show-error -H "X-Google-Metadata-Request: True" -o /home/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh'
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/configure.sh
ExecStart=/home/kubernetes/bin/configure.sh
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kube-master-configuration.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Configure kubernetes master
After=kube-master-installation.service
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/configure-helper.sh
ExecStart=/home/kubernetes/bin/configure-helper.sh
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kube-docker-monitor.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Kubernetes health monitoring for docker
After=kube-master-configuration.service
[Service]
Restart=always
RestartSec=10
RemainAfterExit=yes
RemainAfterExit=yes
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/health-monitor.sh
ExecStart=/home/kubernetes/bin/health-monitor.sh docker
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kubelet-monitor.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Kubernetes health monitoring for kubelet
After=kube-master-configuration.service
[Service]
Restart=always
RestartSec=10
RemainAfterExit=yes
RemainAfterExit=yes
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/health-monitor.sh
ExecStart=/home/kubernetes/bin/health-monitor.sh kubelet
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kube-logrotate.timer
permissions: 0644
owner: root
content: |
[Unit]
Description=Hourly kube-logrotate invocation
[Timer]
OnCalendar=hourly
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kube-logrotate.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Kubernetes log rotation
After=kube-master-configuration.service
[Service]
Type=oneshot
ExecStart=-/usr/sbin/logrotate /etc/logrotate.conf
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kubernetes.target
permissions: 0644
owner: root
content: |
[Unit]
Description=Kubernetes
[Install]
WantedBy=multi-user.target
runcmd:
- systemctl daemon-reload
- systemctl enable kube-master-installation.service
- systemctl enable kube-master-configuration.service
- systemctl enable kube-docker-monitor.service
- systemctl enable kubelet-monitor.service
- systemctl enable kube-logrotate.timer
- systemctl enable kube-logrotate.service
- systemctl enable kubernetes.target
- systemctl start kubernetes.target

View File

@ -0,0 +1 @@
mounter

32
vendor/k8s.io/kubernetes/cluster/gce/gci/mounter/BUILD generated vendored Normal file
View File

@ -0,0 +1,32 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
go_binary(
name = "mounter",
importpath = "k8s.io/kubernetes/cluster/gce/gci/mounter",
library = ":go_default_library",
)
go_library(
name = "go_default_library",
srcs = ["mounter.go"],
importpath = "k8s.io/kubernetes/cluster/gce/gci/mounter",
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,7 @@
## v1 (Thu Oct 20 2016 Vishnu Kannan <vishh@google.com>)
- Creating a container with mount tools pre-installed
- Digest: sha256:9b3c1f04ad6b8947af4eb98f1eff2dc54c5664e3469b4cdf722ec5dd2a1dc064
## v2 (Fri Oct 28 2016 Vishnu Kannan <vishh@google.com>)
- Adding netbase package.
- Digest: sha256:c7dfe059fbbf976fc4284a87eb18adf0f8e0c4cf30a30f5a852842c772a64c2d

View File

@ -0,0 +1,19 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:xenial
RUN apt-get update && apt-get install -y netbase nfs-common=1:1.2.8-9ubuntu12 glusterfs-client=3.7.6-1ubuntu1
ENTRYPOINT ["/bin/mount"]

View File

@ -0,0 +1,30 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TAG=v2
REGISTRY=gcr.io/google_containers
IMAGE=gci-mounter
all: container
container:
docker build --pull -t ${REGISTRY}/${IMAGE}:${TAG} .
push:
gcloud docker -- push ${REGISTRY}/${IMAGE}:${TAG}
upload:
./stage-upload.sh ${TAG} ${REGISTRY}/${IMAGE}:${TAG}
.PHONY: all container push

View File

@ -0,0 +1,93 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
)
const (
// Location of the mount file to use
chrootCmd = "chroot"
mountCmd = "mount"
rootfs = "rootfs"
nfsRPCBindErrMsg = "mount.nfs: rpc.statd is not running but is required for remote locking.\nmount.nfs: Either use '-o nolock' to keep locks local, or start statd.\nmount.nfs: an incorrect mount option was specified\n"
rpcBindCmd = "/sbin/rpcbind"
defaultRootfs = "/home/kubernetes/containerized_mounter/rootfs"
)
func main() {
if len(os.Args) < 2 {
fmt.Fprintf(os.Stderr, "Command failed: must provide a command to run.\n")
return
}
path, _ := filepath.Split(os.Args[0])
rootfsPath := filepath.Join(path, rootfs)
if _, err := os.Stat(rootfsPath); os.IsNotExist(err) {
rootfsPath = defaultRootfs
}
command := os.Args[1]
switch command {
case mountCmd:
mountErr := mountInChroot(rootfsPath, os.Args[2:])
if mountErr != nil {
fmt.Fprintf(os.Stderr, "Mount failed: %v", mountErr)
os.Exit(1)
}
default:
fmt.Fprintf(os.Stderr, "Unknown command, must be %s", mountCmd)
os.Exit(1)
}
}
// MountInChroot is to run mount within chroot with the passing root directory
func mountInChroot(rootfsPath string, args []string) error {
if _, err := os.Stat(rootfsPath); os.IsNotExist(err) {
return fmt.Errorf("path <%s> does not exist", rootfsPath)
}
args = append([]string{rootfsPath, mountCmd}, args...)
output, err := exec.Command(chrootCmd, args...).CombinedOutput()
if err == nil {
return nil
}
if !strings.EqualFold(string(output), nfsRPCBindErrMsg) {
// Mount failed but not because of RPC bind error
return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %v\nOutput: %s", err, chrootCmd, args, string(output))
}
// Mount failed because it is NFS V3 and we need to run rpcBind
output, err = exec.Command(chrootCmd, rootfsPath, rpcBindCmd, "-w").CombinedOutput()
if err != nil {
return fmt.Errorf("Mount issued for NFS V3 but unable to run rpcbind:\n Output: %s\n Error: %v", string(output), err)
}
// Rpcbind is running, try mounting again
output, err = exec.Command(chrootCmd, args...).CombinedOutput()
if err != nil {
return fmt.Errorf("Mount failed for NFS V3 even after running rpcBind %s, %v", string(output), err)
}
return nil
}

View File

@ -0,0 +1,86 @@
#!/bin/sh
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Due to the GCE custom metadata size limit, we split the entire script into two
# files configure.sh and configure-helper.sh. The functionality of downloading
# kubernetes configuration, manifests, docker images, and binary files are
# put in configure.sh, which is uploaded via GCE custom metadata.
set -o errexit
set -o pipefail
set -o nounset
RKT_VERSION="v1.18.0"
DOCKER2ACI_VERSION="v0.13.0"
MOUNTER_VERSION=$1
DOCKER_IMAGE=docker://$2
MOUNTER_ACI_IMAGE=gci-mounter-${MOUNTER_VERSION}.aci
RKT_GCS_DIR=gs://kubernetes-release/rkt/
MOUNTER_GCS_DIR=gs://kubernetes-release/gci-mounter/
TMPDIR=/tmp
# Setup a working directory
DOWNLOAD_DIR=$(mktemp --tmpdir=${TMPDIR} -d gci-mounter-build.XXXXXXXXXX)
# Setup a staging directory
STAGING_DIR=$(mktemp --tmpdir=${TMPDIR} -d gci-mounter-staging.XXXXXXXXXX)
RKT_DIR=${STAGING_DIR}/${RKT_VERSION}
ACI_DIR=${STAGING_DIR}/gci-mounter
CWD=${PWD}
# Cleanup the temporary directories
function cleanup {
rm -rf ${DOWNLOAD_DIR}
rm -rf ${STAGING_DIR}
cd ${CWD}
}
# Delete temporary directories on exit
trap cleanup EXIT
mkdir ${RKT_DIR}
mkdir ${ACI_DIR}
# Download rkt
cd ${DOWNLOAD_DIR}
echo "Downloading rkt ${RKT_VERSION}"
wget "https://github.com/coreos/rkt/releases/download/${RKT_VERSION}/rkt-${RKT_VERSION}.tar.gz" &> /dev/null
echo "Extracting rkt ${RKT_VERSION}"
tar xzf rkt-${RKT_VERSION}.tar.gz
# Stage rkt into working directory
cp rkt-${RKT_VERSION}/rkt ${RKT_DIR}/rkt
cp rkt-${RKT_VERSION}/stage1-fly.aci ${RKT_DIR}/
# Convert docker image to aci and stage it
echo "Downloading docker2aci ${DOCKER2ACI_VERSION}"
wget "https://github.com/appc/docker2aci/releases/download/${DOCKER2ACI_VERSION}/docker2aci-${DOCKER2ACI_VERSION}.tar.gz" &> /dev/null
echo "Extracting docker2aci ${DOCKER2ACI_VERSION}"
tar xzf docker2aci-${DOCKER2ACI_VERSION}.tar.gz
ACI_IMAGE=$(${DOWNLOAD_DIR}/docker2aci-${DOCKER2ACI_VERSION}/docker2aci ${DOCKER_IMAGE} 2>/dev/null | tail -n 1)
cp ${ACI_IMAGE} ${ACI_DIR}/${MOUNTER_ACI_IMAGE}
# Upload the contents to gcs
echo "Uploading rkt artifacts in ${RKT_DIR} to ${RKT_GCS_DIR}"
gsutil cp -R ${RKT_DIR} ${RKT_GCS_DIR}
echo "Uploading gci mounter ACI in ${ACI_DIR} to ${MOUNTER_GCS_DIR}"
gsutil cp ${ACI_DIR}/${MOUNTER_ACI_IMAGE} ${MOUNTER_GCS_DIR}
echo "Upload completed"
echo "Update rkt, stag1-fly.aci & gci-mounter ACI versions and SHA1 in cluster/gce/gci/configure.sh"
echo "${RKT_VERSION}/rkt sha1: $(sha1sum ${RKT_DIR}/rkt)"
echo "${RKT_VERSION}/stage1-fly.aci sha1: $(sha1sum ${RKT_DIR}/stage1-fly.aci)"
echo "${MOUNTER_ACI_IMAGE} hash: $(sha1sum ${ACI_DIR}/${MOUNTER_ACI_IMAGE})"

38
vendor/k8s.io/kubernetes/cluster/gce/gci/node-helper.sh generated vendored Executable file
View File

@ -0,0 +1,38 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for GCI distro
source "${KUBE_ROOT}/cluster/gce/gci/helper.sh"
function get-node-instance-metadata {
local metadata=""
metadata+="kube-env=${KUBE_TEMP}/node-kube-env.yaml,"
metadata+="user-data=${KUBE_ROOT}/cluster/gce/gci/node.yaml,"
metadata+="configure-sh=${KUBE_ROOT}/cluster/gce/gci/configure.sh,"
metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt,"
metadata+="gci-update-strategy=${KUBE_TEMP}/gci-update.txt,"
metadata+="gci-ensure-gke-docker=${KUBE_TEMP}/gci-ensure-gke-docker.txt,"
metadata+="gci-docker-version=${KUBE_TEMP}/gci-docker-version.txt,"
metadata+="${NODE_EXTRA_METADATA}"
echo "${metadata}"
}
# $1: template name (required).
function create-node-instance-template {
local template_name="$1"
ensure-gci-metadata-files
create-node-template "$template_name" "${scope_flags[*]}" "$(get-node-instance-metadata)"
}

128
vendor/k8s.io/kubernetes/cluster/gce/gci/node.yaml generated vendored Normal file
View File

@ -0,0 +1,128 @@
#cloud-config
write_files:
- path: /etc/systemd/system/kube-node-installation.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Download and install k8s binaries and configurations
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/bin/mkdir -p /home/kubernetes/bin
ExecStartPre=/bin/mount --bind /home/kubernetes/bin /home/kubernetes/bin
ExecStartPre=/bin/mount -o remount,exec /home/kubernetes/bin
# Use --retry-connrefused opt only if it's supported by curl.
ExecStartPre=/bin/bash -c 'OPT=""; if curl --help | grep -q -- "--retry-connrefused"; then OPT="--retry-connrefused"; fi; /usr/bin/curl --fail --retry 5 --retry-delay 3 $OPT --silent --show-error -H "X-Google-Metadata-Request: True" -o /home/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh'
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/configure.sh
ExecStart=/home/kubernetes/bin/configure.sh
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kube-node-configuration.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Configure kubernetes node
After=kube-node-installation.service
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/configure-helper.sh
ExecStart=/home/kubernetes/bin/configure-helper.sh
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kube-docker-monitor.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Kubernetes health monitoring for docker
After=kube-node-configuration.service
[Service]
Restart=always
RestartSec=10
RemainAfterExit=yes
RemainAfterExit=yes
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/health-monitor.sh
ExecStart=/home/kubernetes/bin/health-monitor.sh docker
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kubelet-monitor.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Kubernetes health monitoring for kubelet
After=kube-node-configuration.service
[Service]
Restart=always
RestartSec=10
RemainAfterExit=yes
RemainAfterExit=yes
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/health-monitor.sh
ExecStart=/home/kubernetes/bin/health-monitor.sh kubelet
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kube-logrotate.timer
permissions: 0644
owner: root
content: |
[Unit]
Description=Hourly kube-logrotate invocation
[Timer]
OnCalendar=hourly
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kube-logrotate.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Kubernetes log rotation
After=kube-node-configuration.service
[Service]
Type=oneshot
ExecStart=-/usr/sbin/logrotate /etc/logrotate.conf
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kubernetes.target
permissions: 0644
owner: root
content: |
[Unit]
Description=Kubernetes
[Install]
WantedBy=multi-user.target
runcmd:
- systemctl daemon-reload
- systemctl enable kube-node-installation.service
- systemctl enable kube-node-configuration.service
- systemctl enable kube-docker-monitor.service
- systemctl enable kubelet-monitor.service
- systemctl enable kube-logrotate.timer
- systemctl enable kube-logrotate.service
- systemctl enable kubernetes.target
- systemctl start kubernetes.target

94
vendor/k8s.io/kubernetes/cluster/gce/list-resources.sh generated vendored Executable file
View File

@ -0,0 +1,94 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Calls gcloud to print out a variety of Google Cloud Platform resources used by
# Kubernetes. Can be run before/after test runs and compared to track leaking
# resources.
# PROJECT must be set in the environment.
# If ZONE, KUBE_GCE_INSTANCE_PREFIX, CLUSTER_NAME, KUBE_GCE_NETWORK, or
# KUBE_GKE_NETWORK is set, they will be used to filter the results.
set -o errexit
set -o nounset
set -o pipefail
ZONE=${ZONE:-}
REGION=${ZONE%-*}
INSTANCE_PREFIX=${KUBE_GCE_INSTANCE_PREFIX:-${CLUSTER_NAME:-}}
NETWORK=${KUBE_GCE_NETWORK:-${KUBE_GKE_NETWORK:-}}
# In GKE the instance prefix starts with "gke-".
if [[ "${KUBERNETES_PROVIDER:-}" == "gke" ]]; then
INSTANCE_PREFIX="gke-${CLUSTER_NAME}"
# Truncate to 26 characters for route prefix matching.
INSTANCE_PREFIX="${INSTANCE_PREFIX:0:26}"
fi
# Usage: gcloud-compute-list <resource> <additional parameters to gcloud...>
# GREP_REGEX is applied to the output of gcloud if set
GREP_REGEX=""
function gcloud-compute-list() {
local -r resource=$1
local -r filter=${2:-}
echo -e "\n\n[ ${resource} ]"
local attempt=1
local result=""
while true; do
if result=$(gcloud compute ${resource} list --project=${PROJECT} ${filter:+--filter="$filter"} ${@:3}); then
if [[ ! -z "${GREP_REGEX}" ]]; then
result=$(echo "${result}" | grep "${GREP_REGEX}" || true)
fi
echo "${result}"
return
fi
echo -e "Attempt ${attempt} failed to list ${resource}. Retrying." >&2
attempt=$(($attempt+1))
if [[ ${attempt} > 5 ]]; then
echo -e "List ${resource} failed!" >&2
exit 2
fi
sleep $((5*${attempt}))
done
}
echo "Project: ${PROJECT}"
echo "Region: ${REGION}"
echo "Zone: ${ZONE}"
echo "Instance prefix: ${INSTANCE_PREFIX:-}"
echo "Network: ${NETWORK}"
echo "Provider: ${KUBERNETES_PROVIDER:-}"
# List resources related to instances, filtering by the instance prefix if
# provided.
gcloud-compute-list instance-templates "name ~ '${INSTANCE_PREFIX}.*'"
gcloud-compute-list instance-groups "${ZONE:+"zone:(${ZONE}) AND "}name ~ '${INSTANCE_PREFIX}.*'"
gcloud-compute-list instances "${ZONE:+"zone:(${ZONE}) AND "}name ~ '${INSTANCE_PREFIX}.*'"
# List disk resources, filtering by instance prefix if provided.
gcloud-compute-list disks "${ZONE:+"zone:(${ZONE}) AND "}name ~ '${INSTANCE_PREFIX}.*'"
# List network resources. We include names starting with "a", corresponding to
# those that Kubernetes creates.
gcloud-compute-list addresses "${REGION:+"region=(${REGION}) AND "}name ~ 'a.*|${INSTANCE_PREFIX}.*'"
# Match either the header or a line with the specified e2e network.
# This assumes that the network name is the second field in the output.
GREP_REGEX="^NAME\|^[^ ]\+[ ]\+\(default\|${NETWORK}\) "
gcloud-compute-list routes "name ~ 'default.*|${INSTANCE_PREFIX}.*'"
gcloud-compute-list firewall-rules "name ~ 'default.*|k8s-fw.*|${INSTANCE_PREFIX}.*'"
GREP_REGEX=""
gcloud-compute-list forwarding-rules ${REGION:+"region=(${REGION})"}
gcloud-compute-list target-pools ${REGION:+"region=(${REGION})"}

1
vendor/k8s.io/kubernetes/cluster/gce/ubuntu generated vendored Symbolic link
View File

@ -0,0 +1 @@
gci

602
vendor/k8s.io/kubernetes/cluster/gce/upgrade.sh generated vendored Executable file
View File

@ -0,0 +1,602 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# !!!EXPERIMENTAL !!! Upgrade script for GCE. Expect this to get
# rewritten in Go in relatively short order, but it allows us to start
# testing the concepts.
set -o errexit
set -o nounset
set -o pipefail
if [[ "${KUBERNETES_PROVIDER:-gce}" != "gce" ]]; then
echo "!!! ${1} only works on GCE" >&2
exit 1
fi
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/hack/lib/util.sh"
source "${KUBE_ROOT}/cluster/kube-util.sh"
function usage() {
echo "!!! EXPERIMENTAL !!!"
echo ""
echo "${0} [-M | -N | -P] [-o] (-l | <version number or publication>)"
echo " Upgrades master and nodes by default"
echo " -M: Upgrade master only"
echo " -N: Upgrade nodes only"
echo " -P: Node upgrade prerequisites only (create a new instance template)"
echo " -c: Upgrade NODE_UPGRADE_PARALLELISM nodes in parallel (default=1) within a single instance group. The MIGs themselves are dealt serially."
echo " -o: Use os distro sepcified in KUBE_NODE_OS_DISTRIBUTION for new nodes. Options include 'debian' or 'gci'"
echo " -l: Use local(dev) binaries. This is only supported for master upgrades."
echo ""
echo ' Version number or publication is either a proper version number'
echo ' (e.g. "v1.0.6", "v1.2.0-alpha.1.881+376438b69c7612") or a version'
echo ' publication of the form <bucket>/<version> (e.g. "release/stable",'
echo ' "ci/latest-1"). Some common ones are:'
echo ' - "release/stable"'
echo ' - "release/latest"'
echo ' - "ci/latest"'
echo ' See the docs on getting builds for more information about version publication.'
echo ""
echo "(... Fetching current release versions ...)"
echo ""
# NOTE: IF YOU CHANGE THE FOLLOWING LIST, ALSO UPDATE test/e2e/cluster_upgrade.go
local release_stable
local release_latest
local ci_latest
release_stable=$(gsutil cat gs://kubernetes-release/release/stable.txt)
release_latest=$(gsutil cat gs://kubernetes-release/release/latest.txt)
ci_latest=$(gsutil cat gs://kubernetes-release-dev/ci/latest.txt)
echo "Right now, versions are as follows:"
echo " release/stable: ${0} ${release_stable}"
echo " release/latest: ${0} ${release_latest}"
echo " ci/latest: ${0} ${ci_latest}"
}
function print-node-version-info() {
echo "== $1 Node OS and Kubelet Versions =="
"${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o=jsonpath='{range .items[*]}name: "{.metadata.name}", osImage: "{.status.nodeInfo.osImage}", kubeletVersion: "{.status.nodeInfo.kubeletVersion}"{"\n"}{end}'
}
function upgrade-master() {
local num_masters
num_masters=$(get-master-replicas-count)
if [[ "${num_masters}" -gt 1 ]]; then
echo "Upgrade of master not supported if more than one master replica present. The current number of master replicas: ${num_masters}"
exit 1
fi
echo "== Upgrading master to '${SERVER_BINARY_TAR_URL}'. Do not interrupt, deleting master instance. =="
# Tries to figure out KUBE_USER/KUBE_PASSWORD by first looking under
# kubeconfig:username, and then under kubeconfig:username-basic-auth.
# TODO: KUBE_USER is used in generating ABAC policy which the
# apiserver may not have enabled. If it's enabled, we must have a user
# to generate a valid ABAC policy. If the username changes, should
# the script fail? Should we generate a default username and password
# if the section is missing in kubeconfig? Handle this better in 1.5.
get-kubeconfig-basicauth
get-kubeconfig-bearertoken
detect-master
parse-master-env
upgrade-master-env
# Delete the master instance. Note that the master-pd is created
# with auto-delete=no, so it should not be deleted.
gcloud compute instances delete \
--project "${PROJECT}" \
--quiet \
--zone "${ZONE}" \
"${MASTER_NAME}"
create-master-instance "${MASTER_NAME}-ip"
wait-for-master
}
function upgrade-master-env() {
echo "== Upgrading master environment variables. =="
# Generate the node problem detector token if it isn't present on the original
# master.
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" && "${NODE_PROBLEM_DETECTOR_TOKEN:-}" == "" ]]; then
NODE_PROBLEM_DETECTOR_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
fi
}
function wait-for-master() {
echo "== Waiting for new master to respond to API requests =="
local curl_auth_arg
if [[ -n ${KUBE_BEARER_TOKEN:-} ]]; then
curl_auth_arg=(-H "Authorization: Bearer ${KUBE_BEARER_TOKEN}")
elif [[ -n ${KUBE_PASSWORD:-} ]]; then
curl_auth_arg=(--user "${KUBE_USER}:${KUBE_PASSWORD}")
else
echo "can't get auth credentials for the current master"
exit 1
fi
until curl --insecure "${curl_auth_arg[@]}" --max-time 5 \
--fail --output /dev/null --silent "https://${KUBE_MASTER_IP}/healthz"; do
printf "."
sleep 2
done
echo "== Done =="
}
# Perform common upgrade setup tasks
#
# Assumed vars
# KUBE_VERSION
function prepare-upgrade() {
kube::util::ensure-temp-dir
detect-project
detect-subnetworks
detect-node-names # sets INSTANCE_GROUPS
write-cluster-name
tars_from_version
}
# Reads kube-env metadata from first node in NODE_NAMES.
#
# Assumed vars:
# NODE_NAMES
# PROJECT
# ZONE
function get-node-env() {
# TODO(zmerlynn): Make this more reliable with retries.
gcloud compute --project ${PROJECT} ssh --zone ${ZONE} ${NODE_NAMES[0]} --command \
"curl --fail --silent -H 'Metadata-Flavor: Google' \
'http://metadata/computeMetadata/v1/instance/attributes/kube-env'" 2>/dev/null
}
# Read os distro information from /os/release on node.
# $1: The name of node
#
# Assumed vars:
# PROJECT
# ZONE
function get-node-os() {
gcloud compute ssh "$1" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--command \
"cat /etc/os-release | grep \"^ID=.*\" | cut -c 4-"
}
# Assumed vars:
# KUBE_VERSION
# NODE_SCOPES
# NODE_INSTANCE_PREFIX
# PROJECT
# ZONE
#
# Vars set:
# KUBELET_TOKEN
# KUBE_PROXY_TOKEN
# NODE_PROBLEM_DETECTOR_TOKEN
# CA_CERT_BASE64
# EXTRA_DOCKER_OPTS
# KUBELET_CERT_BASE64
# KUBELET_KEY_BASE64
function upgrade-nodes() {
prepare-node-upgrade
do-node-upgrade
}
function setup-base-image() {
if [[ "${env_os_distro}" == "false" ]]; then
echo "== Ensuring that new Node base OS image matched the existing Node base OS image"
NODE_OS_DISTRIBUTION=$(get-node-os "${NODE_NAMES[0]}")
if [[ "${NODE_OS_DISTRIBUTION}" == "cos" ]]; then
NODE_OS_DISTRIBUTION="gci"
fi
source "${KUBE_ROOT}/cluster/gce/${NODE_OS_DISTRIBUTION}/node-helper.sh"
# Reset the node image based on current os distro
set-node-image
fi
}
# prepare-node-upgrade creates a new instance template suitable for upgrading
# to KUBE_VERSION and echos a single line with the name of the new template.
#
# Assumed vars:
# KUBE_VERSION
# NODE_SCOPES
# NODE_INSTANCE_PREFIX
# PROJECT
# ZONE
#
# Vars set:
# SANITIZED_VERSION
# INSTANCE_GROUPS
# KUBELET_TOKEN
# KUBE_PROXY_TOKEN
# NODE_PROBLEM_DETECTOR_TOKEN
# CA_CERT_BASE64
# EXTRA_DOCKER_OPTS
# KUBELET_CERT_BASE64
# KUBELET_KEY_BASE64
function prepare-node-upgrade() {
echo "== Preparing node upgrade (to ${KUBE_VERSION}). ==" >&2
setup-base-image
SANITIZED_VERSION=$(echo ${KUBE_VERSION} | sed 's/[\.\+]/-/g')
# TODO(zmerlynn): Refactor setting scope flags.
local scope_flags=
if [ -n "${NODE_SCOPES}" ]; then
scope_flags="--scopes ${NODE_SCOPES}"
else
scope_flags="--no-scopes"
fi
# Get required node env vars from exiting template.
local node_env=$(get-node-env)
KUBELET_TOKEN=$(get-env-val "${node_env}" "KUBELET_TOKEN")
KUBE_PROXY_TOKEN=$(get-env-val "${node_env}" "KUBE_PROXY_TOKEN")
NODE_PROBLEM_DETECTOR_TOKEN=$(get-env-val "${node_env}" "NODE_PROBLEM_DETECTOR_TOKEN")
CA_CERT_BASE64=$(get-env-val "${node_env}" "CA_CERT")
EXTRA_DOCKER_OPTS=$(get-env-val "${node_env}" "EXTRA_DOCKER_OPTS")
KUBELET_CERT_BASE64=$(get-env-val "${node_env}" "KUBELET_CERT")
KUBELET_KEY_BASE64=$(get-env-val "${node_env}" "KUBELET_KEY")
upgrade-node-env
# TODO(zmerlynn): How do we ensure kube-env is written in a ${version}-
# compatible way?
write-node-env
# TODO(zmerlynn): Get configure-vm script from ${version}. (Must plumb this
# through all create-node-instance-template implementations).
local template_name=$(get-template-name-from-version ${SANITIZED_VERSION})
create-node-instance-template "${template_name}"
# The following is echo'd so that callers can get the template name.
echo "Instance template name: ${template_name}"
echo "== Finished preparing node upgrade (to ${KUBE_VERSION}). ==" >&2
}
function upgrade-node-env() {
echo "== Upgrading node environment variables. =="
# Get the node problem detector token from master if it isn't present on
# the original node.
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" && "${NODE_PROBLEM_DETECTOR_TOKEN:-}" == "" ]]; then
detect-master
local master_env=$(get-master-env)
NODE_PROBLEM_DETECTOR_TOKEN=$(get-env-val "${master_env}" "NODE_PROBLEM_DETECTOR_TOKEN")
fi
}
# Upgrades a single node.
# $1: The name of the node
#
# Note: This is called multiple times from do-node-upgrade() in parallel, so should be thread-safe.
function do-single-node-upgrade() {
local -r instance="$1"
instance_id=$(gcloud compute instances describe "${instance}" \
--format='get(id)' \
--project="${PROJECT}" \
--zone="${ZONE}" 2>&1) && describe_rc=$? || describe_rc=$?
if [[ "${describe_rc}" != 0 ]]; then
echo "== FAILED to describe ${instance} =="
echo "${instance_id}"
return ${describe_rc}
fi
# Drain node
echo "== Draining ${instance}. == " >&2
"${KUBE_ROOT}/cluster/kubectl.sh" drain --delete-local-data --force --ignore-daemonsets "${instance}" \
&& drain_rc=$? || drain_rc=$?
if [[ "${drain_rc}" != 0 ]]; then
echo "== FAILED to drain ${instance} =="
return ${drain_rc}
fi
# Recreate instance
echo "== Recreating instance ${instance}. ==" >&2
recreate=$(gcloud compute instance-groups managed recreate-instances "${group}" \
--project="${PROJECT}" \
--zone="${ZONE}" \
--instances="${instance}" 2>&1) && recreate_rc=$? || recreate_rc=$?
if [[ "${recreate_rc}" != 0 ]]; then
echo "== FAILED to recreate ${instance} =="
echo "${recreate}"
return ${recreate_rc}
fi
# Wait for instance to be recreated
echo "== Waiting for instance ${instance} to be recreated. ==" >&2
while true; do
new_instance_id=$(gcloud compute instances describe "${instance}" \
--format='get(id)' \
--project="${PROJECT}" \
--zone="${ZONE}" 2>&1) && describe_rc=$? || describe_rc=$?
if [[ "${describe_rc}" != 0 ]]; then
echo "== FAILED to describe ${instance} =="
echo "${new_instance_id}"
echo " (Will retry.)"
elif [[ "${new_instance_id}" == "${instance_id}" ]]; then
echo -n .
else
echo "Instance ${instance} recreated."
break
fi
sleep 1
done
# Wait for k8s node object to reflect new instance id
echo "== Waiting for new node to be added to k8s. ==" >&2
while true; do
external_id=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output=jsonpath='{.spec.externalID}' 2>&1) && kubectl_rc=$? || kubectl_rc=$?
if [[ "${kubectl_rc}" != 0 ]]; then
echo "== FAILED to get node ${instance} =="
echo "${external_id}"
echo " (Will retry.)"
elif [[ "${external_id}" == "${new_instance_id}" ]]; then
echo "Node ${instance} recreated."
break
elif [[ "${external_id}" == "${instance_id}" ]]; then
echo -n .
else
echo "Unexpected external_id '${external_id}' matches neither old ('${instance_id}') nor new ('${new_instance_id}')."
echo " (Will retry.)"
fi
sleep 1
done
# Wait for the node to not have SchedulingDisabled=True and also to have
# Ready=True.
echo "== Waiting for ${instance} to become ready. ==" >&2
while true; do
cordoned=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output='jsonpath={.status.conditions[?(@.type == "SchedulingDisabled")].status}')
ready=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output='jsonpath={.status.conditions[?(@.type == "Ready")].status}')
if [[ "${cordoned}" == 'True' ]]; then
echo "Node ${instance} is still not ready: SchedulingDisabled=${ready}"
elif [[ "${ready}" != 'True' ]]; then
echo "Node ${instance} is still not ready: Ready=${ready}"
else
echo "Node ${instance} Ready=${ready}"
break
fi
sleep 1
done
}
# Prereqs:
# - prepare-node-upgrade should have been called successfully
function do-node-upgrade() {
echo "== Upgrading nodes to ${KUBE_VERSION} with max parallelism of ${node_upgrade_parallelism}. ==" >&2
# Do the actual upgrade.
# NOTE(zmerlynn): If you are changing this gcloud command, update
# test/e2e/cluster_upgrade.go to match this EXACTLY.
local template_name=$(get-template-name-from-version ${SANITIZED_VERSION})
local old_templates=()
local updates=()
for group in ${INSTANCE_GROUPS[@]}; do
old_templates+=($(gcloud compute instance-groups managed list \
--project="${PROJECT}" \
--filter="name ~ '${group}' AND zone:(${ZONE})" \
--format='value(instanceTemplate)' || true))
set_instance_template_out=$(gcloud compute instance-groups managed set-instance-template "${group}" \
--template="${template_name}" \
--project="${PROJECT}" \
--zone="${ZONE}" 2>&1) && set_instance_template_rc=$? || set_instance_template_rc=$?
if [[ "${set_instance_template_rc}" != 0 ]]; then
echo "== FAILED to set-instance-template for ${group} to ${template_name} =="
echo "${set_instance_template_out}"
return ${set_instance_template_rc}
fi
instances=()
instances+=($(gcloud compute instance-groups managed list-instances "${group}" \
--format='value(instance)' \
--project="${PROJECT}" \
--zone="${ZONE}" 2>&1)) && list_instances_rc=$? || list_instances_rc=$?
if [[ "${list_instances_rc}" != 0 ]]; then
echo "== FAILED to list instances in group ${group} =="
echo "${instances}"
return ${list_instances_rc}
fi
process_count_left=${node_upgrade_parallelism}
pids=()
ret_code_sum=0 # Should stay 0 in the loop iff all parallel node upgrades succeed.
for instance in ${instances[@]}; do
do-single-node-upgrade "${instance}" & pids+=("$!")
# We don't want to run more than ${node_upgrade_parallelism} upgrades at a time,
# so wait once we hit that many nodes. This isn't ideal, since one might take much
# longer than the others, but it should help.
process_count_left=$((process_count_left - 1))
if [[ process_count_left -eq 0 || "${instance}" == "${instances[-1]}" ]]; then
# Wait for each of the parallel node upgrades to finish.
for pid in "${pids[@]}"; do
wait $pid
ret_code_sum=$(( ret_code_sum + $? ))
done
# Return even if at least one of the node upgrades failed.
if [[ ${ret_code_sum} != 0 ]]; then
echo "== Some of the ${node_upgrade_parallelism} parallel node upgrades failed. =="
return ${ret_code_sum}
fi
process_count_left=${node_upgrade_parallelism}
fi
done
done
# Remove the old templates.
echo "== Deleting old templates in ${PROJECT}. ==" >&2
for tmpl in ${old_templates[@]}; do
gcloud compute instance-templates delete \
--quiet \
--project="${PROJECT}" \
"${tmpl}" || true
done
echo "== Finished upgrading nodes to ${KUBE_VERSION}. ==" >&2
}
master_upgrade=true
node_upgrade=true
node_prereqs=false
local_binaries=false
env_os_distro=false
node_upgrade_parallelism=1
while getopts ":MNPlcho" opt; do
case ${opt} in
M)
node_upgrade=false
;;
N)
master_upgrade=false
;;
P)
node_prereqs=true
;;
l)
local_binaries=true
;;
c)
node_upgrade_parallelism=${NODE_UPGRADE_PARALLELISM:-1}
;;
o)
env_os_distro=true
;;
h)
usage
exit 0
;;
\?)
echo "Invalid option: -$OPTARG" >&2
usage
exit 1
;;
esac
done
shift $((OPTIND-1))
if [[ $# -gt 1 ]]; then
echo "Error: Only one parameter (<version number or publication>) may be passed after the set of flags!" >&2
usage
exit 1
fi
if [[ $# -lt 1 ]] && [[ "${local_binaries}" == "false" ]]; then
usage
exit 1
fi
if [[ "${master_upgrade}" == "false" ]] && [[ "${node_upgrade}" == "false" ]]; then
echo "Can't specify both -M and -N" >&2
exit 1
fi
# prompt if etcd storage media type isn't set unless using etcd2 when doing master upgrade
if [[ -z "${STORAGE_MEDIA_TYPE:-}" ]] && [[ "${STORAGE_BACKEND:-}" != "etcd2" ]] && [[ "${master_upgrade}" == "true" ]]; then
echo "The default etcd storage media type in 1.6 has changed from application/json to application/vnd.kubernetes.protobuf."
echo "Documentation about the change can be found at https://kubernetes.io/docs/admin/etcd_upgrade."
echo ""
echo "ETCD2 DOES NOT SUPPORT PROTOBUF: If you wish to have to ability to downgrade to etcd2 later application/json must be used."
echo ""
echo "It's HIGHLY recommended that etcd be backed up before this step!!"
echo ""
echo "To enable using json, before running this script set:"
echo "export STORAGE_MEDIA_TYPE=application/json"
echo ""
if [ -t 0 ] && [ -t 1 ]; then
read -p "Would you like to continue with the new default, and lose the ability to downgrade to etcd2? [y/N] " confirm
if [[ "${confirm}" != "y" ]]; then
exit 1
fi
else
echo "To enable using protobuf, before running this script set:"
echo "export STORAGE_MEDIA_TYPE=application/vnd.kubernetes.protobuf"
echo ""
echo "STORAGE_MEDIA_TYPE must be specified when run non-interactively." >&2
exit 1
fi
fi
# Prompt if etcd image/version is unspecified when doing master upgrade.
# In e2e tests, we use TEST_ALLOW_IMPLICIT_ETCD_UPGRADE=true to skip this
# prompt, simulating the behavior when the user confirms interactively.
# All other automated use of this script should explicitly specify a version.
if [[ "${master_upgrade}" == "true" ]]; then
if [[ -z "${ETCD_IMAGE:-}" && -z "${TEST_ETCD_IMAGE:-}" ]] || [[ -z "${ETCD_VERSION:-}" && -z "${TEST_ETCD_VERSION:-}" ]]; then
echo
echo "***WARNING***"
echo "Upgrading Kubernetes with this script might result in an upgrade to a new etcd version."
echo "Some etcd version upgrades, such as 3.0.x to 3.1.x, DO NOT offer a downgrade path."
echo "To pin the etcd version to your current one (e.g. v3.0.17), set the following variables"
echo "before running this script:"
echo
echo "# example: pin to etcd v3.0.17"
echo "export ETCD_IMAGE=3.0.17"
echo "export ETCD_VERSION=3.0.17"
echo
echo "Alternatively, if you choose to allow an etcd upgrade that doesn't support downgrade,"
echo "you might still be able to downgrade Kubernetes by pinning to the newer etcd version."
echo "In all cases, it is strongly recommended to have an etcd backup before upgrading."
echo
if [ -t 0 ] && [ -t 1 ]; then
read -p "Continue with default etcd version, which might upgrade etcd? [y/N] " confirm
if [[ "${confirm}" != "y" ]]; then
exit 1
fi
elif [[ "${TEST_ALLOW_IMPLICIT_ETCD_UPGRADE:-}" != "true" ]]; then
echo "ETCD_IMAGE and ETCD_VERSION must be specified when run non-interactively." >&2
exit 1
fi
fi
fi
print-node-version-info "Pre-Upgrade"
if [[ "${local_binaries}" == "false" ]]; then
set_binary_version ${1}
fi
prepare-upgrade
if [[ "${node_prereqs}" == "true" ]]; then
prepare-node-upgrade
exit 0
fi
if [[ "${master_upgrade}" == "true" ]]; then
upgrade-master
fi
if [[ "${node_upgrade}" == "true" ]]; then
if [[ "${local_binaries}" == "true" ]]; then
echo "Upgrading nodes to local binaries is not yet supported." >&2
exit 1
else
upgrade-nodes
fi
fi
echo "== Validating cluster post-upgrade =="
"${KUBE_ROOT}/cluster/validate-cluster.sh"
print-node-version-info "Post-Upgrade"

2244
vendor/k8s.io/kubernetes/cluster/gce/util.sh generated vendored Executable file

File diff suppressed because it is too large Load Diff