vendor updates

This commit is contained in:
Serguei Bezverkhi
2018-03-06 17:33:18 -05:00
parent 4b3ebc171b
commit e9033989a0
5854 changed files with 248382 additions and 119809 deletions

View File

@ -1,26 +1,17 @@
package(default_visibility = ["//visibility:public"])
load("@io_bazel//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
load("@io_kubernetes_build//defs:build.bzl", "release_filegroup")
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
pkg_tar(
name = "gci-trusty-manifests",
files = [
"container-linux/configure-helper.sh",
"gci/configure-helper.sh",
"gci/health-monitor.sh",
"//cluster/gce/gci/mounter",
],
files = {
"//cluster/gce/gci/mounter": "gci-mounter",
"gci/configure-helper.sh": "gci-configure-helper.sh",
"gci/health-monitor.sh": "health-monitor.sh",
},
mode = "0755",
strip_prefix = ".",
# pkg_tar doesn't support renaming the files we add, so instead create symlinks.
symlinks = {
"container-linux-configure-helper.sh": "container-linux/configure-helper.sh",
"gci-configure-helper.sh": "gci/configure-helper.sh",
"health-monitor.sh": "gci/health-monitor.sh",
"gci-mounter": "gci/mounter/mounter",
"trusty-configure-helper.sh": "trusty/configure-helper.sh",
},
)
filegroup(
@ -40,15 +31,32 @@ filegroup(
tags = ["automanaged"],
)
# Having the configure-vm.sh script and and trusty code from the GCE cluster
# deploy hosted with the release is useful for GKE.
# This list should match the list in kubernetes/release/lib/releaselib.sh.
# Having the COS code from the GCE cluster deploy hosted with the release is
# useful for GKE. This list should match the list in
# kubernetes/release/lib/releaselib.sh.
release_filegroup(
name = "gcs-release-artifacts",
srcs = [
"configure-vm.sh",
"gci/configure.sh",
"gci/master.yaml",
"gci/node.yaml",
],
)
pkg_tar(
name = "gce-master-manifests",
srcs = [
"manifests/abac-authz-policy.jsonl",
"manifests/cluster-autoscaler.manifest",
"manifests/e2e-image-puller.manifest",
"manifests/etcd.manifest",
"manifests/glbc.manifest",
"manifests/kube-addon-manager.yaml",
"manifests/kube-apiserver.manifest",
"manifests/kube-controller-manager.manifest",
"manifests/kube-proxy.manifest",
"manifests/kube-scheduler.manifest",
"manifests/rescheduler.manifest",
],
mode = "0644",
)

View File

@ -3,8 +3,12 @@ reviewers:
- gmarek
- jszczepkowski
- vishh
- mwielgus
- MaciekPytel
approvers:
- bowei
- gmarek
- jszczepkowski
- vishh
- mwielgus
- MaciekPytel

View File

@ -1,6 +1,6 @@
package(default_visibility = ["//visibility:public"])
load("@io_bazel//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
filegroup(
name = "addon-srcs",
@ -16,10 +16,10 @@ filegroup(
pkg_tar(
name = "addons",
extension = "tar.gz",
files = [
srcs = [
":addon-srcs",
],
extension = "tar.gz",
mode = "0644",
strip_prefix = ".",
)

View File

@ -0,0 +1,10 @@
apiVersion: "v1"
kind: "LimitRange"
metadata:
name: "limits"
namespace: default
spec:
limits:
- type: "Container"
defaultRequest:
cpu: "100m"

View File

@ -0,0 +1,30 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cloud-provider
subjects:
- kind: ServiceAccount
name: cloud-provider
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cloud-provider
subjects:
- kind: ServiceAccount
name: cloud-provider
namespace: kube-system

View File

@ -0,0 +1,35 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- get
- patch
- update
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update

View File

@ -3,7 +3,7 @@ kind: PodSecurityPolicy
metadata:
name: gce.unprivileged-addon
annotations:
kubernetes.io/description: 'This policy grants the minimum ammount of
kubernetes.io/description: 'This policy grants the minimum amount of
privilege necessary to run non-privileged kube-system pods. This policy is
not intended for use outside of kube-system, and may include further
restrictions in the future.'

View File

@ -98,4 +98,6 @@ function get-cluster-ip-range {
echo "${suggested_range}"
}
# NOTE: Avoid giving nodes empty scopes, because kubelet needs a service account
# in order to initialize properly.
NODE_SCOPES="${NODE_SCOPES:-monitoring,logging-write,storage-ro}"

View File

@ -39,7 +39,7 @@ NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
NODE_LOCAL_SSDS=${NODE_LOCAL_SSDS:-0}
# An extension to local SSDs allowing users to specify block/fs and SCSI/NVMe devices
# Format of this variable will be "#,scsi/nvme,block/fs" you can specify multiple
# configurations by seperating them by a semi-colon ex. "2,scsi,fs;1,nvme,block"
# configurations by separating them by a semi-colon ex. "2,scsi,fs;1,nvme,block"
# is a request for 2 SCSI formatted and mounted SSDs and 1 NVMe block device SSD.
NODE_LOCAL_SSDS_EXT=${NODE_LOCAL_SSDS_EXT:-}
# Accelerators to be attached to each node. Format "type=<accelerator-type>,count=<accelerator-count>"
@ -54,12 +54,6 @@ CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false}
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
if [[ "${MASTER_OS_DISTRIBUTION}" == "coreos" ]]; then
MASTER_OS_DISTRIBUTION="container-linux"
fi
if [[ "${NODE_OS_DISTRIBUTION}" == "coreos" ]]; then
NODE_OS_DISTRIBUTION="container-linux"
fi
if [[ "${MASTER_OS_DISTRIBUTION}" == "cos" ]]; then
MASTER_OS_DISTRIBUTION="gci"
@ -80,7 +74,7 @@ fi
# Also please update corresponding image for node e2e at:
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
CVM_VERSION=${CVM_VERSION:-container-vm-v20170627}
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-60-9592-90-0}
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-63-10032-71-0}
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}}
@ -88,13 +82,16 @@ NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-cos-cloud}
NODE_SERVICE_ACCOUNT=${KUBE_GCE_NODE_SERVICE_ACCOUNT:-default}
CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker}
CONTAINER_RUNTIME_ENDPOINT=${KUBE_CONTAINER_RUNTIME_ENDPOINT:-}
LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-docker load -i}
CONTAINER_RUNTIME_NAME=${KUBE_CONTAINER_RUNTIME_NAME:-}
LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-}
RKT_VERSION=${KUBE_RKT_VERSION:-1.23.0}
RKT_STAGE1_IMAGE=${KUBE_RKT_STAGE1_IMAGE:-coreos.com/rkt/stage1-coreos}
# MASTER_EXTRA_METADATA is the extra instance metadata on master instance separated by commas.
MASTER_EXTRA_METADATA=${KUBE_MASTER_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}}
# MASTER_EXTRA_METADATA is the extra instance metadata on node instance separated by commas.
NODE_EXTRA_METADATA=${KUBE_NODE_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}}
# KUBELET_TEST_ARGS are extra arguments passed to kubelet.
KUBELET_TEST_ARGS=${KUBE_KUBELET_EXTRA_ARGS:-}
NETWORK=${KUBE_GCE_NETWORK:-default}
# Enable network deletion by default (for kube-down), unless we're using 'default' network.
@ -121,11 +118,16 @@ MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
# It is the primary range in the subnet and is the range used for node instance IPs.
NODE_IP_RANGE="$(get-node-ip-range)"
# NOTE: Avoid giving nodes empty scopes, because kubelet needs a service account
# in order to initialize properly.
NODE_SCOPES="${NODE_SCOPES:-monitoring,logging-write,storage-ro}"
# Extra docker options for nodes.
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-}"
VOLUME_PLUGIN_DIR="${VOLUME_PLUGIN_DIR:-/home/kubernetes/flexvolume}"
REMOUNT_VOLUME_PLUGIN_DIR="${REMOUNT_VOLUME_PLUGIN_DIR:-true}"
SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/16}" # formerly PORTAL_NET
ALLOCATE_NODE_CIDRS=true
@ -159,7 +161,7 @@ ENABLE_METRICS_SERVER="${KUBE_ENABLE_METRICS_SERVER:-true}"
ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}"
# Version tag of metadata agent
METADATA_AGENT_VERSION="${KUBE_METADATA_AGENT_VERSION:-0.2-0.0.13-5-watch}"
METADATA_AGENT_VERSION="${KUBE_METADATA_AGENT_VERSION:-0.2-0.0.16-1}"
# One special node out of NUM_NODES would be created of this type if specified.
# Useful for scheduling heapster in large clusters with nodes of small size.
@ -190,7 +192,7 @@ if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then
# Put the necessary label on the node so the daemonset gets scheduled.
NODE_LABELS="${NODE_LABELS},beta.kubernetes.io/metadata-proxy-ready=true"
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_METADATA_CONCEALMENT"
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_METADATA_CONCEALMENT METADATA_CONCEALMENT_NO_FIREWALL"
fi
# Optional: Enable node logging.
@ -229,12 +231,6 @@ DNS_DOMAIN="${KUBE_DNS_DOMAIN:-cluster.local}"
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}"
# Optional: Install cluster docker registry.
ENABLE_CLUSTER_REGISTRY="${KUBE_ENABLE_CLUSTER_REGISTRY:-false}"
CLUSTER_REGISTRY_DISK="${CLUSTER_REGISTRY_PD:-${INSTANCE_PREFIX}-kube-system-kube-registry}"
CLUSTER_REGISTRY_DISK_SIZE="${CLUSTER_REGISTRY_DISK_SIZE:-200GB}"
CLUSTER_REGISTRY_DISK_TYPE_GCE="${CLUSTER_REGISTRY_DISK_TYPE_GCE:-pd-standard}"
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
@ -271,14 +267,19 @@ ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
# IP_ALIAS_SUBNETWORK is the subnetwork to allocate from. If empty, a
# new subnetwork will be created for the cluster.
ENABLE_IP_ALIASES=${KUBE_GCE_ENABLE_IP_ALIASES:-false}
NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-RangeAllocator}
if [ ${ENABLE_IP_ALIASES} = true ]; then
# Size of ranges allocated to each node. Currently supports only /32 and /24.
IP_ALIAS_SIZE=${KUBE_GCE_IP_ALIAS_SIZE:-/24}
IP_ALIAS_SUBNETWORK=${KUBE_GCE_IP_ALIAS_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-default}
# Reserve the services IP space to avoid being allocated for other GCP resources.
SERVICE_CLUSTER_IP_SUBNETWORK=${KUBE_GCE_SERVICE_CLUSTER_IP_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-services}
NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-CloudAllocator}
SECONDARY_RANGE_NAME=${SECONDARY_RANGE_NAME:-}
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_IP_ALIASES"
PROVIDER_VARS="${PROVIDER_VARS:-} NODE_IPAM_MODE"
PROVIDER_VARS="${PROVIDER_VARS:-} SECONDARY_RANGE_NAME"
fi
# Enable GCE Alpha features.
@ -297,12 +298,17 @@ if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
fi
# Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,NodeRestriction,Priority,PVCProtection
ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
fi
# MutatingAdmissionWebhook should be the last controller that modifies the
# request object, otherwise users will be confused if the mutating webhooks'
# modification is overwritten.
ADMISSION_CONTROL="${ADMISSION_CONTROL},MutatingAdmissionWebhook,ValidatingAdmissionWebhook"
# ResourceQuota must come last, or a creation is recorded, but the pod was forbidden.
ADMISSION_CONTROL="${ADMISSION_CONTROL},ResourceQuota"
@ -313,10 +319,7 @@ KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
STORAGE_BACKEND=${STORAGE_BACKEND:-}
# Networking plugin specific settings.
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, opencontrail, kubenet
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, kubenet
# Network Policy plugin specific settings.
NETWORK_POLICY_PROVIDER="${NETWORK_POLICY_PROVIDER:-none}" # calico
@ -340,10 +343,6 @@ ENABLE_DEFAULT_STORAGE_CLASS="${ENABLE_DEFAULT_STORAGE_CLASS:-true}"
# Optional: Enable legacy ABAC policy that makes all service accounts superusers.
ENABLE_LEGACY_ABAC="${ENABLE_LEGACY_ABAC:-false}" # true, false
# TODO(dawn1107): Remove this once the flag is built into CVM image.
# Kernel panic upon soft lockup issue
SOFTLOCKUP_PANIC="${SOFTLOCKUP_PANIC:-false}" # true, false
# Indicates if the values (i.e. KUBE_USER and KUBE_PASSWORD for basic
# authentication) in metadata should be treated as canonical, and therefore disk
# copies ought to be recreated/clobbered.
@ -363,9 +362,10 @@ if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
fi
# Fluentd requirements
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-300Mi}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-100m}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-200Mi}"
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.2-1.5.28-1}"
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}"
# Heapster requirements
HEAPSTER_GCP_BASE_MEMORY="${HEAPSTER_GCP_BASE_MEMORY:-140Mi}"
@ -374,7 +374,7 @@ HEAPSTER_GCP_BASE_CPU="${HEAPSTER_GCP_BASE_CPU:-80m}"
HEAPSTER_GCP_CPU_PER_NODE="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE"
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE"
# prometheus-to-sd configuration
PROMETHEUS_TO_SD_ENDPOINT="${PROMETHEUS_TO_SD_ENDPOINT:-https://monitoring.googleapis.com/}"
@ -400,3 +400,9 @@ ROTATE_CERTIFICATES="${ROTATE_CERTIFICATES:-}"
# The number of services that are allowed to sync concurrently. Will be passed
# into kube-controller-manager via `--concurrent-service-syncs`
CONCURRENT_SERVICE_SYNCS="${CONCURRENT_SERVICE_SYNCS:-}"
if [[ "${ENABLE_TOKENREQUEST:-}" == "true" ]]; then
FEATURE_GATES="${FEATURE_GATES},TokenRequest=true"
SERVICEACCOUNT_ISSUER="https://kubernetes.io/${CLUSTER_NAME}"
SERVICEACCOUNT_API_AUDIENCES="https://kubernetes.default.svc"
fi

View File

@ -37,6 +37,11 @@ MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-$(get-master-root-disk-size)}
NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard}
NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
NODE_LOCAL_SSDS=${NODE_LOCAL_SSDS:-0}
# An extension to local SSDs allowing users to specify block/fs and SCSI/NVMe devices
# Format of this variable will be "#,scsi/nvme,block/fs" you can specify multiple
# configurations by separating them by a semi-colon ex. "2,scsi,fs;1,nvme,block"
# is a request for 2 SCSI formatted and mounted SSDs and 1 NVMe block device SSD.
NODE_LOCAL_SSDS_EXT=${NODE_LOCAL_SSDS_EXT:-}
NODE_ACCELERATORS=${NODE_ACCELERATORS:-""}
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true}
KUBE_APISERVER_REQUEST_TIMEOUT=300
@ -48,13 +53,6 @@ CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false}
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
if [[ "${MASTER_OS_DISTRIBUTION}" == "coreos" ]]; then
MASTER_OS_DISTRIBUTION="container-linux"
fi
if [[ "${NODE_OS_DISTRIBUTION}" == "coreos" ]]; then
NODE_OS_DISTRIBUTION="container-linux"
fi
if [[ "${MASTER_OS_DISTRIBUTION}" == "cos" ]]; then
MASTER_OS_DISTRIBUTION="gci"
fi
@ -74,7 +72,7 @@ fi
# Also please update corresponding image for node e2e at:
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
CVM_VERSION=${CVM_VERSION:-container-vm-v20170627}
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-60-9592-90-0}
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-63-10032-71-0}
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}}
@ -82,7 +80,8 @@ NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-cos-cloud}
NODE_SERVICE_ACCOUNT=${KUBE_GCE_NODE_SERVICE_ACCOUNT:-default}
CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker}
CONTAINER_RUNTIME_ENDPOINT=${KUBE_CONTAINER_RUNTIME_ENDPOINT:-}
LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-docker load -i}
CONTAINER_RUNTIME_NAME=${KUBE_CONTAINER_RUNTIME_NAME:-}
LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-}
GCI_DOCKER_VERSION=${KUBE_GCI_DOCKER_VERSION:-}
RKT_VERSION=${KUBE_RKT_VERSION:-1.23.0}
RKT_STAGE1_IMAGE=${KUBE_RKT_STAGE1_IMAGE:-coreos.com/rkt/stage1-coreos}
@ -147,11 +146,21 @@ ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
# TODO(piosz) remove this option once Metrics Server became a stable thing.
ENABLE_METRICS_SERVER="${KUBE_ENABLE_METRICS_SERVER:-true}"
# Optional: Metadata agent to setup as part of the cluster bring up:
# none - No metadata agent
# stackdriver - Stackdriver metadata agent
# Metadata agent is a daemon set that provides metadata of kubernetes objects
# running on the same node for exporting metrics and logs.
ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}"
# Version tag of metadata agent
METADATA_AGENT_VERSION="${KUBE_METADATA_AGENT_VERSION:-0.2-0.0.16-1}"
# One special node out of NUM_NODES would be created of this type if specified.
# Useful for scheduling heapster in large clusters with nodes of small size.
HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
# Set etcd image (e.g. gcr.io/google_containers/etcd) and version (e.g. 3.1.10) if you need
# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.2.14) if you need
# non-default version.
ETCD_IMAGE="${TEST_ETCD_IMAGE:-}"
ETCD_DOCKER_REPOSITORY="${TEST_ETCD_DOCKER_REPOSITORY:-}"
@ -166,15 +175,8 @@ CONTROLLER_MANAGER_TEST_LOG_LEVEL="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-$TEST_CL
SCHEDULER_TEST_LOG_LEVEL="${SCHEDULER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
KUBEPROXY_TEST_LOG_LEVEL="${KUBEPROXY_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
# TODO: change this and flex e2e test when default flex volume install path is changed for GCI
# Set flex dir to one that's readable from controller-manager container and writable by the flex e2e test.
if [[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]]; then
CONTROLLER_MANAGER_TEST_VOLUME_PLUGIN_DIR="--flex-volume-plugin-dir=/etc/srv/kubernetes/kubelet-plugins/volume/exec"
fi
# Set flex dir to one that's readable from kubelet and writable by the flex e2e test.
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]] || ([[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]] && [[ "${REGISTER_MASTER_KUBELET}" == "false" ]]); then
KUBELET_TEST_VOLUME_PLUGIN_DIR="--volume-plugin-dir=/etc/srv/kubernetes/kubelet-plugins/volume/exec"
fi
VOLUME_PLUGIN_DIR="${VOLUME_PLUGIN_DIR:-/home/kubernetes/flexvolume}"
REMOUNT_VOLUME_PLUGIN_DIR="${REMOUNT_VOLUME_PLUGIN_DIR:-true}"
TEST_CLUSTER_DELETE_COLLECTION_WORKERS="${TEST_CLUSTER_DELETE_COLLECTION_WORKERS:---delete-collection-workers=1}"
TEST_CLUSTER_MAX_REQUESTS_INFLIGHT="${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT:-}"
@ -183,7 +185,7 @@ TEST_CLUSTER_RESYNC_PERIOD="${TEST_CLUSTER_RESYNC_PERIOD:---min-resync-period=3m
# ContentType used by all components to communicate with apiserver.
TEST_CLUSTER_API_CONTENT_TYPE="${TEST_CLUSTER_API_CONTENT_TYPE:-}"
KUBELET_TEST_ARGS="${KUBELET_TEST_ARGS:-} --max-pods=110 --serialize-image-pulls=false ${TEST_CLUSTER_API_CONTENT_TYPE} ${KUBELET_TEST_VOLUME_PLUGIN_DIR:-}"
KUBELET_TEST_ARGS="${KUBELET_TEST_ARGS:-} --max-pods=110 --serialize-image-pulls=false ${TEST_CLUSTER_API_CONTENT_TYPE}"
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]] || [[ "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]]; then
NODE_KUBELET_TEST_ARGS=" --experimental-kernel-memcg-notification=true"
fi
@ -191,7 +193,7 @@ if [[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]] || [[ "${MASTER_OS_DISTRIBUTION}"
MASTER_KUBELET_TEST_ARGS=" --experimental-kernel-memcg-notification=true"
fi
APISERVER_TEST_ARGS="${APISERVER_TEST_ARGS:-} --runtime-config=extensions/v1beta1 ${TEST_CLUSTER_DELETE_COLLECTION_WORKERS} ${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT}"
CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE} ${CONTROLLER_MANAGER_TEST_VOLUME_PLUGIN_DIR:-}"
CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE}"
SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
KUBEPROXY_TEST_ARGS="${KUBEPROXY_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
@ -218,7 +220,7 @@ if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then
# Put the necessary label on the node so the daemonset gets scheduled.
NODE_LABELS="${NODE_LABELS},beta.kubernetes.io/metadata-proxy-ready=true"
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_METADATA_CONCEALMENT"
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_METADATA_CONCEALMENT METADATA_CONCEALMENT_NO_FIREWALL"
fi
# Optional: Enable node logging.
@ -251,12 +253,6 @@ DNS_DOMAIN="cluster.local"
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}"
# Optional: Install cluster docker registry.
ENABLE_CLUSTER_REGISTRY="${KUBE_ENABLE_CLUSTER_REGISTRY:-false}"
CLUSTER_REGISTRY_DISK="${CLUSTER_REGISTRY_DISK:-${INSTANCE_PREFIX}-kube-system-kube-registry}"
CLUSTER_REGISTRY_DISK_SIZE="${CLUSTER_REGISTRY_DISK_SIZE:-200GB}"
CLUSTER_REGISTRY_DISK_TYPE_GCE="${CLUSTER_REGISTRY_DISK_TYPE_GCE:-pd-standard}"
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
@ -293,14 +289,19 @@ ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
# IP_ALIAS_SUBNETWORK is the subnetwork to allocate from. If empty, a
# new subnetwork will be created for the cluster.
ENABLE_IP_ALIASES=${KUBE_GCE_ENABLE_IP_ALIASES:-false}
NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-RangeAllocator}
if [ ${ENABLE_IP_ALIASES} = true ]; then
# Size of ranges allocated to each node. gcloud current supports only /32 and /24.
IP_ALIAS_SIZE=${KUBE_GCE_IP_ALIAS_SIZE:-/24}
IP_ALIAS_SUBNETWORK=${KUBE_GCE_IP_ALIAS_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-default}
# Reserve the services IP space to avoid being allocated for other GCP resources.
SERVICE_CLUSTER_IP_SUBNETWORK=${KUBE_GCE_SERVICE_CLUSTER_IP_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-services}
NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-CloudAllocator}
SECONDARY_RANGE_NAME=${SECONDARY_RANGE_NAME:-}
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_IP_ALIASES"
PROVIDER_VARS="${PROVIDER_VARS:-} NODE_IPAM_MODE"
PROVIDER_VARS="${PROVIDER_VARS:-} SECONDARY_RANGE_NAME"
fi
# Enable GCE Alpha features.
@ -319,7 +320,7 @@ if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
fi
if [[ -z "${KUBE_ADMISSION_CONTROL:-}" ]]; then
ADMISSION_CONTROL="Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,PodPreset,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority"
ADMISSION_CONTROL="Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,PodPreset,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection"
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
fi
@ -343,15 +344,13 @@ STORAGE_BACKEND=${STORAGE_BACKEND:-}
# Storage media type: application/json and application/vnd.kubernetes.protobuf are supported.
STORAGE_MEDIA_TYPE=${STORAGE_MEDIA_TYPE:-}
# OpenContrail networking plugin specific settings
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, opencontrail, kubenet
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, kubenet
# Network Policy plugin specific settings.
NETWORK_POLICY_PROVIDER="${NETWORK_POLICY_PROVIDER:-none}" # calico
NON_MASQUERADE_CIDR="0.0.0.0/0"
# How should the kubelet configure hairpin mode?
HAIRPIN_MODE="${HAIRPIN_MODE:-promiscuous-bridge}" # promiscuous-bridge, hairpin-veth, none
@ -376,10 +375,6 @@ ENABLE_DEFAULT_STORAGE_CLASS="${ENABLE_DEFAULT_STORAGE_CLASS:-true}"
# Upgrade test jobs that go from a version < 1.6 to a version >= 1.6 should override this to be true.
ENABLE_LEGACY_ABAC="${ENABLE_LEGACY_ABAC:-false}" # true, false
# TODO(dawn1107): Remove this once the flag is built into CVM image.
# Kernel panic upon soft lockup issue
SOFTLOCKUP_PANIC="${SOFTLOCKUP_PANIC:-true}" # true, false
# Enable a simple "AdvancedAuditing" setup for testing.
ENABLE_APISERVER_ADVANCED_AUDIT="${ENABLE_APISERVER_ADVANCED_AUDIT:-true}" # true, false
@ -397,9 +392,10 @@ if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
fi
# Fluentd requirements
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-300Mi}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-100m}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-200Mi}"
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.2-1.5.28-1}"
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}"
# Heapster requirements
HEAPSTER_GCP_BASE_MEMORY="${HEAPSTER_GCP_BASE_MEMORY:-140Mi}"
@ -408,7 +404,7 @@ HEAPSTER_GCP_BASE_CPU="${HEAPSTER_GCP_BASE_CPU:-80m}"
HEAPSTER_GCP_CPU_PER_NODE="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE"
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE"
# prometheus-to-sd configuration
PROMETHEUS_TO_SD_ENDPOINT="${PROMETHEUS_TO_SD_ENDPOINT:-https://monitoring.googleapis.com/}"
@ -419,6 +415,9 @@ ENABLE_PROMETHEUS_TO_SD="${ENABLE_PROMETHEUS_TO_SD:-true}"
# Optional: [Experiment Only] Run kube-proxy as a DaemonSet if set to true, run as static pods otherwise.
KUBE_PROXY_DAEMONSET="${KUBE_PROXY_DAEMONSET:-false}" # true, false
# Optional: Change the kube-proxy implementation. Choices are [iptables, ipvs].
KUBE_PROXY_MODE="${KUBE_PROXY_MODE:-iptables}"
# Optional: duration of cluster signed certificates.
CLUSTER_SIGNING_DURATION="${CLUSTER_SIGNING_DURATION:-}"
@ -434,3 +433,9 @@ ROTATE_CERTIFICATES="${ROTATE_CERTIFICATES:-}"
# The number of services that are allowed to sync concurrently. Will be passed
# into kube-controller-manager via `--concurrent-service-syncs`
CONCURRENT_SERVICE_SYNCS="${CONCURRENT_SERVICE_SYNCS:-}"
if [[ "${ENABLE_TOKENREQUEST:-}" == "true" ]]; then
FEATURE_GATES="${FEATURE_GATES},TokenRequest=true"
SERVICEACCOUNT_ISSUER="https://kubernetes.io/${CLUSTER_NAME}"
SERVICEACCOUNT_API_AUDIENCES="https://kubernetes.default.svc"
fi

View File

@ -1,899 +0,0 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# If we have any arguments at all, this is a push and not just setup.
is_push=$@
function ensure-basic-networking() {
# Deal with GCE networking bring-up race. (We rely on DNS for a lot,
# and it's just not worth doing a whole lot of startup work if this
# isn't ready yet.)
until getent hosts metadata.google.internal &>/dev/null; do
echo 'Waiting for functional DNS (trying to resolve metadata.google.internal)...'
sleep 3
done
until getent hosts $(hostname -f || echo _error_) &>/dev/null; do
echo 'Waiting for functional DNS (trying to resolve my own FQDN)...'
sleep 3
done
until getent hosts $(hostname -i || echo _error_) &>/dev/null; do
echo 'Waiting for functional DNS (trying to resolve my own IP)...'
sleep 3
done
echo "Networking functional on $(hostname) ($(hostname -i))"
}
# A hookpoint for installing any needed packages
ensure-packages() {
:
}
function create-node-pki {
echo "Creating node pki files"
local -r pki_dir="/etc/kubernetes/pki"
mkdir -p "${pki_dir}"
if [[ -z "${CA_CERT_BUNDLE:-}" ]]; then
CA_CERT_BUNDLE="${CA_CERT}"
fi
CA_CERT_BUNDLE_PATH="${pki_dir}/ca-certificates.crt"
echo "${CA_CERT_BUNDLE}" | base64 --decode > "${CA_CERT_BUNDLE_PATH}"
if [[ ! -z "${KUBELET_CERT:-}" && ! -z "${KUBELET_KEY:-}" ]]; then
KUBELET_CERT_PATH="${pki_dir}/kubelet.crt"
echo "${KUBELET_CERT}" | base64 --decode > "${KUBELET_CERT_PATH}"
KUBELET_KEY_PATH="${pki_dir}/kubelet.key"
echo "${KUBELET_KEY}" | base64 --decode > "${KUBELET_KEY_PATH}"
fi
}
# A hookpoint for setting up local devices
ensure-local-disks() {
for ssd in /dev/disk/by-id/google-local-ssd-*; do
if [ -e "$ssd" ]; then
ssdnum=`echo $ssd | sed -e 's/\/dev\/disk\/by-id\/google-local-ssd-\([0-9]*\)/\1/'`
echo "Formatting and mounting local SSD $ssd to /mnt/disks/ssd$ssdnum"
mkdir -p /mnt/disks/ssd$ssdnum
/usr/share/google/safe_format_and_mount -m "mkfs.ext4 -F" "${ssd}" /mnt/disks/ssd$ssdnum &>/var/log/local-ssd-$ssdnum-mount.log || \
{ echo "Local SSD $ssdnum mount failed, review /var/log/local-ssd-$ssdnum-mount.log"; return 1; }
else
echo "No local SSD disks found."
fi
done
}
function config-ip-firewall {
echo "Configuring IP firewall rules"
if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]]; then
echo "Add rule for metadata concealment"
iptables -w -t nat -I PREROUTING -p tcp -d 169.254.169.254 --dport 80 -m comment --comment "metadata-concealment: bridge traffic to metadata server goes to metadata proxy" -j DNAT --to-destination 127.0.0.1:988
fi
}
function ensure-install-dir() {
INSTALL_DIR="/var/cache/kubernetes-install"
mkdir -p ${INSTALL_DIR}
cd ${INSTALL_DIR}
}
function salt-apiserver-timeout-grain() {
cat <<EOF >>/etc/salt/minion.d/grains.conf
minRequestTimeout: '$1'
EOF
}
function set-broken-motd() {
echo -e '\nBroken (or in progress) Kubernetes node setup! Suggested first step:\n tail /var/log/startupscript.log\n' > /etc/motd
}
function reset-motd() {
# kubelet is installed both on the master and nodes, and the version is easy to parse (unlike kubectl)
local -r version="$(/usr/local/bin/kubelet --version=true | cut -f2 -d " ")"
# This logic grabs either a release tag (v1.2.1 or v1.2.1-alpha.1),
# or the git hash that's in the build info.
local gitref="$(echo "${version}" | sed -r "s/(v[0-9]+\.[0-9]+\.[0-9]+)(-[a-z]+\.[0-9]+)?.*/\1\2/g")"
local devel=""
if [[ "${gitref}" != "${version}" ]]; then
devel="
Note: This looks like a development version, which might not be present on GitHub.
If it isn't, the closest tag is at:
https://github.com/kubernetes/kubernetes/tree/${gitref}
"
gitref="${version//*+/}"
fi
cat > /etc/motd <<EOF
Welcome to Kubernetes ${version}!
You can find documentation for Kubernetes at:
http://docs.kubernetes.io/
The source for this release can be found at:
/usr/local/share/doc/kubernetes/kubernetes-src.tar.gz
Or you can download it at:
https://storage.googleapis.com/kubernetes-release/release/${version}/kubernetes-src.tar.gz
It is based on the Kubernetes source at:
https://github.com/kubernetes/kubernetes/tree/${gitref}
${devel}
For Kubernetes copyright and licensing information, see:
/usr/local/share/doc/kubernetes/LICENSES
EOF
}
function curl-metadata() {
curl --fail --retry 5 --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/attributes/${1}"
}
function set-kube-env() {
local kube_env_yaml="${INSTALL_DIR}/kube_env.yaml"
until curl-metadata kube-env > "${kube_env_yaml}"; do
echo 'Waiting for kube-env...'
sleep 3
done
# kube-env has all the environment variables we care about, in a flat yaml format
eval "$(python -c '
import pipes,sys,yaml
for k,v in yaml.load(sys.stdin).iteritems():
print("""readonly {var}={value}""".format(var = k, value = pipes.quote(str(v))))
print("""export {var}""".format(var = k))
' < """${kube_env_yaml}""")"
}
function remove-docker-artifacts() {
echo "== Deleting docker0 =="
apt-get-install bridge-utils
# Remove docker artifacts on minion nodes, if present
iptables -t nat -F || true
ifconfig docker0 down || true
brctl delbr docker0 || true
echo "== Finished deleting docker0 =="
}
# Retry a download until we get it. Takes a hash and a set of URLs.
#
# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown.
# $2+ are the URLs to download.
download-or-bust() {
local -r hash="$1"
shift 1
urls=( $* )
while true; do
for url in "${urls[@]}"; do
local file="${url##*/}"
rm -f "${file}"
if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 "${url}"; then
echo "== Failed to download ${url}. Retrying. =="
elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha1sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} =="
return 1
fi
}
apt-get-install() {
local -r packages=( $@ )
installed=true
for package in "${packages[@]}"; do
if ! dpkg -s "${package}" &>/dev/null; then
installed=false
break
fi
done
if [[ "${installed}" == "true" ]]; then
echo "== ${packages[@]} already installed, skipped apt-get install ${packages[@]} =="
return
fi
apt-get-update
# Forcibly install packages (options borrowed from Salt logs).
until apt-get -q -y -o DPkg::Options::=--force-confold -o DPkg::Options::=--force-confdef install $@; do
echo "== install of packages $@ failed, retrying =="
sleep 5
done
}
apt-get-update() {
echo "== Refreshing package database =="
until apt-get update; do
echo "== apt-get update failed, retrying =="
sleep 5
done
}
# Restart any services that need restarting due to a library upgrade
# Uses needrestart
restart-updated-services() {
# We default to restarting services, because this is only done as part of an update
if [[ "${AUTO_RESTART_SERVICES:-true}" != "true" ]]; then
echo "Auto restart of services prevented by AUTO_RESTART_SERVICES=${AUTO_RESTART_SERVICES}"
return
fi
echo "Restarting services with updated libraries (needrestart -r a)"
# The pipes make sure that needrestart doesn't think it is running with a TTY
# Debian bug #803249; fixed but not necessarily in package repos yet
echo "" | needrestart -r a 2>&1 | tee /dev/null
}
# Reboot the machine if /var/run/reboot-required exists
reboot-if-required() {
if [[ ! -e "/var/run/reboot-required" ]]; then
return
fi
echo "Reboot is required (/var/run/reboot-required detected)"
if [[ -e "/var/run/reboot-required.pkgs" ]]; then
echo "Packages that triggered reboot:"
cat /var/run/reboot-required.pkgs
fi
# We default to rebooting the machine because this is only done as part of an update
if [[ "${AUTO_REBOOT:-true}" != "true" ]]; then
echo "Reboot prevented by AUTO_REBOOT=${AUTO_REBOOT}"
return
fi
rm -f /var/run/reboot-required
rm -f /var/run/reboot-required.pkgs
echo "Triggering reboot"
init 6
}
# Install upgrades using unattended-upgrades, then reboot or restart services
auto-upgrade() {
# We default to not installing upgrades
if [[ "${AUTO_UPGRADE:-false}" != "true" ]]; then
echo "AUTO_UPGRADE not set to true; won't auto-upgrade"
return
fi
apt-get-install unattended-upgrades needrestart
unattended-upgrade --debug
reboot-if-required # We may reboot the machine right here
restart-updated-services
}
#
# Install salt from GCS. See README.md for instructions on how to update these
# debs.
install-salt() {
if dpkg -s salt-minion &>/dev/null; then
echo "== SaltStack already installed, skipping install step =="
return
fi
echo "== Refreshing package database =="
until apt-get update; do
echo "== apt-get update failed, retrying =="
sleep 5
done
mkdir -p /var/cache/salt-install
cd /var/cache/salt-install
DEBS=(
libzmq3_3.2.3+dfsg-1~bpo70~dst+1_amd64.deb
python-zmq_13.1.0-1~bpo70~dst+1_amd64.deb
salt-common_2014.1.13+ds-1~bpo70+1_all.deb
salt-minion_2014.1.13+ds-1~bpo70+1_all.deb
)
URL_BASE="https://storage.googleapis.com/kubernetes-release/salt"
for deb in "${DEBS[@]}"; do
if [ ! -e "${deb}" ]; then
download-or-bust "" "${URL_BASE}/${deb}"
fi
done
# Based on
# https://major.io/2014/06/26/install-debian-packages-without-starting-daemons/
# We do this to prevent Salt from starting the salt-minion
# daemon. The other packages don't have relevant daemons. (If you
# add a package that needs a daemon started, add it to a different
# list.)
cat > /usr/sbin/policy-rc.d <<EOF
#!/bin/sh
echo "Salt shall not start." >&2
exit 101
EOF
chmod 0755 /usr/sbin/policy-rc.d
for deb in "${DEBS[@]}"; do
echo "== Installing ${deb}, ignore dependency complaints (will fix later) =="
dpkg --skip-same-version --force-depends -i "${deb}"
done
# This will install any of the unmet dependencies from above.
echo "== Installing unmet dependencies =="
until apt-get install -f -y; do
echo "== apt-get install failed, retrying =="
sleep 5
done
rm /usr/sbin/policy-rc.d
# Log a timestamp
echo "== Finished installing Salt =="
}
# Ensure salt-minion isn't running and never runs
stop-salt-minion() {
if [[ -e /etc/init/salt-minion.override ]]; then
# Assume this has already run (upgrade, or baked into containervm)
return
fi
# This ensures it on next reboot
echo manual > /etc/init/salt-minion.override
update-rc.d salt-minion disable
while service salt-minion status >/dev/null; do
echo "salt-minion found running, stopping"
service salt-minion stop
sleep 1
done
}
# Finds the master PD device; returns it in MASTER_PD_DEVICE
find-master-pd() {
MASTER_PD_DEVICE=""
if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then
return
fi
device_info=$(ls -l /dev/disk/by-id/google-master-pd)
relative_path=${device_info##* }
MASTER_PD_DEVICE="/dev/disk/by-id/${relative_path}"
}
# Create the overlay files for the salt tree. We create these in a separate
# place so that we can blow away the rest of the salt configs on a kube-push and
# re-apply these.
function create-salt-pillar() {
# Always overwrite the cluster-params.sls (even on a push, we have
# these variables)
mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
node_tags: '$(echo "$NODE_TAGS" | sed -e "s/'/''/g")'
node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")'
cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
allocate_node_cidrs: '$(echo "$ALLOCATE_NODE_CIDRS" | sed -e "s/'/''/g")'
non_masquerade_cidr: '$(echo "$NON_MASQUERADE_CIDR" | sed -e "s/'/''/g")'
service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'
enable_cluster_ui: '$(echo "$ENABLE_CLUSTER_UI" | sed -e "s/'/''/g")'
enable_node_problem_detector: '$(echo "$ENABLE_NODE_PROBLEM_DETECTOR" | sed -e "s/'/''/g")'
enable_l7_loadbalancing: '$(echo "$ENABLE_L7_LOADBALANCING" | sed -e "s/'/''/g")'
enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
enable_metadata_proxy: '$(echo "$ENABLE_METADATA_CONCEALMENT" | sed -e "s/'/''/g")'
enable_metrics_server: '$(echo "$ENABLE_METRICS_SERVER" | sed -e "s/'/''/g")'
enable_pod_security_policy: '$(echo "$ENABLE_POD_SECURITY_POLICY" | sed -e "s/'/''/g")'
enable_rescheduler: '$(echo "$ENABLE_RESCHEDULER" | sed -e "s/'/''/g")'
logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
cluster_dns_core_dns: '$(echo "$CLUSTER_DNS_CORE_DNS" | sed -e "s/'/''/g")'
enable_cluster_registry: '$(echo "$ENABLE_CLUSTER_REGISTRY" | sed -e "s/'/''/g")'
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
enable_dns_horizontal_autoscaler: '$(echo "$ENABLE_DNS_HORIZONTAL_AUTOSCALER" | sed -e "s/'/''/g")'
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
network_provider: '$(echo "$NETWORK_PROVIDER" | sed -e "s/'/''/g")'
prepull_e2e_images: '$(echo "$PREPULL_E2E_IMAGES" | sed -e "s/'/''/g")'
hairpin_mode: '$(echo "$HAIRPIN_MODE" | sed -e "s/'/''/g")'
softlockup_panic: '$(echo "$SOFTLOCKUP_PANIC" | sed -e "s/'/''/g")'
opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG" | sed -e "s/'/''/g")'
opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG")'
opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET")'
network_policy_provider: '$(echo "$NETWORK_POLICY_PROVIDER" | sed -e "s/'/''/g")'
enable_manifest_url: '$(echo "${ENABLE_MANIFEST_URL:-}" | sed -e "s/'/''/g")'
manifest_url: '$(echo "${MANIFEST_URL:-}" | sed -e "s/'/''/g")'
manifest_url_header: '$(echo "${MANIFEST_URL_HEADER:-}" | sed -e "s/'/''/g")'
num_nodes: $(echo "${NUM_NODES:-}" | sed -e "s/'/''/g")
e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")'
kube_uid: '$(echo "${KUBE_UID}" | sed -e "s/'/''/g")'
initial_etcd_cluster: '$(echo "${INITIAL_ETCD_CLUSTER:-}" | sed -e "s/'/''/g")'
initial_etcd_cluster_state: '$(echo "${INITIAL_ETCD_CLUSTER_STATE:-}" | sed -e "s/'/''/g")'
ca_cert_bundle_path: '$(echo "${CA_CERT_BUNDLE_PATH:-}" | sed -e "s/'/''/g")'
hostname: '$(echo "${ETCD_HOSTNAME:-$(hostname -s)}" | sed -e "s/'/''/g")'
enable_pod_priority: '$(echo "${ENABLE_POD_PRIORITY:-}" | sed -e "s/'/''/g")'
enable_default_storage_class: '$(echo "$ENABLE_DEFAULT_STORAGE_CLASS" | sed -e "s/'/''/g")'
kube_proxy_daemonset: '$(echo "$KUBE_PROXY_DAEMONSET" | sed -e "s/'/''/g")'
EOF
if [ -n "${STORAGE_BACKEND:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
storage_backend: '$(echo "$STORAGE_BACKEND" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${STORAGE_MEDIA_TYPE:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
storage_media_type: '$(echo "$STORAGE_MEDIA_TYPE" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT_SEC:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kube_apiserver_request_timeout_sec: '$(echo "$KUBE_APISERVER_REQUEST_TIMEOUT_SEC" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
etcd_liveness_probe_initial_delay: '$(echo "$ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kube_apiserver_liveness_probe_initial_delay: '$(echo "$KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ADMISSION_CONTROL:-}" ] && [ ${ADMISSION_CONTROL} == *"ImagePolicyWebhook"* ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
admission-control-config-file: /etc/admission_controller.config
EOF
fi
if [ -n "${KUBELET_PORT:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kubelet_port: '$(echo "$KUBELET_PORT" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ETCD_IMAGE:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
etcd_docker_tag: '$(echo "$ETCD_IMAGE" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ETCD_DOCKER_REPOSITORY:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
etcd_docker_repository: '$(echo "$ETCD_DOCKER_REPOSITORY" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ETCD_VERSION:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
etcd_version: '$(echo "$ETCD_VERSION" | sed -e "s/'/''/g")'
EOF
fi
if [[ -n "${ETCD_CA_KEY:-}" && -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
etcd_over_ssl: 'true'
EOF
else
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
etcd_over_ssl: 'false'
EOF
fi
if [ -n "${ETCD_QUORUM_READ:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
etcd_quorum_read: '$(echo "${ETCD_QUORUM_READ}" | sed -e "s/'/''/g")'
EOF
fi
# Configuration changes for test clusters
if [ -n "${APISERVER_TEST_ARGS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
apiserver_test_args: '$(echo "$APISERVER_TEST_ARGS" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${API_SERVER_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
api_server_test_log_level: '$(echo "$API_SERVER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBELET_TEST_ARGS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kubelet_test_args: '$(echo "$KUBELET_TEST_ARGS" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBELET_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kubelet_test_log_level: '$(echo "$KUBELET_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${DOCKER_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
docker_test_log_level: '$(echo "$DOCKER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${CONTROLLER_MANAGER_TEST_ARGS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
controller_manager_test_args: '$(echo "$CONTROLLER_MANAGER_TEST_ARGS" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
controller_manager_test_log_level: '$(echo "$CONTROLLER_MANAGER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${SCHEDULER_TEST_ARGS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
scheduler_test_args: '$(echo "$SCHEDULER_TEST_ARGS" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${SCHEDULER_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
scheduler_test_log_level: '$(echo "$SCHEDULER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBEPROXY_TEST_ARGS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kubeproxy_test_args: '$(echo "$KUBEPROXY_TEST_ARGS" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBEPROXY_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kubeproxy_test_log_level: '$(echo "$KUBEPROXY_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
# TODO: Replace this with a persistent volume (and create it).
if [[ "${ENABLE_CLUSTER_REGISTRY}" == true && -n "${CLUSTER_REGISTRY_DISK}" ]]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
cluster_registry_disk_type: gce
cluster_registry_disk_size: $(echo $(convert-bytes-gce-kube ${CLUSTER_REGISTRY_DISK_SIZE}) | sed -e "s/'/''/g")
cluster_registry_disk_name: $(echo ${CLUSTER_REGISTRY_DISK} | sed -e "s/'/''/g")
EOF
fi
if [ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
terminated_pod_gc_threshold: '$(echo "${TERMINATED_POD_GC_THRESHOLD}" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ENABLE_CUSTOM_METRICS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
enable_custom_metrics: '$(echo "${ENABLE_CUSTOM_METRICS}" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${NODE_LABELS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
node_labels: '$(echo "${NODE_LABELS}" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${NON_MASTER_NODE_LABELS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
non_master_node_labels: '$(echo "${NON_MASTER_NODE_LABELS}" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${NODE_TAINTS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
node_taints: '$(echo "${NODE_TAINTS}" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${EVICTION_HARD:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
eviction_hard: '$(echo "${EVICTION_HARD}" | sed -e "s/'/''/g")'
EOF
fi
if [[ "${ENABLE_CLUSTER_AUTOSCALER:-false}" == "true" ]]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
enable_cluster_autoscaler: '$(echo "${ENABLE_CLUSTER_AUTOSCALER}" | sed -e "s/'/''/g")'
autoscaler_mig_config: '$(echo "${AUTOSCALER_MIG_CONFIG}" | sed -e "s/'/''/g")'
autoscaler_expander_config: '$(echo "${AUTOSCALER_EXPANDER_CONFIG}" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
scheduling_algorithm_provider: '$(echo "${SCHEDULING_ALGORITHM_PROVIDER}" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ENABLE_IP_ALIASES:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
enable_ip_aliases: '$(echo "$ENABLE_IP_ALIASES" | sed -e "s/'/''/g")'
EOF
fi
}
# The job of this function is simple, but the basic regular expression syntax makes
# this difficult to read. What we want to do is convert from [0-9]+B, KB, KiB, MB, etc
# into [0-9]+, Ki, Mi, Gi, etc.
# This is done in two steps:
# 1. Convert from [0-9]+X?i?B into [0-9]X? (X denotes the prefix, ? means the field
# is optional.
# 2. Attach an 'i' to the end of the string if we find a letter.
# The two step process is needed to handle the edge case in which we want to convert
# a raw byte count, as the result should be a simple number (e.g. 5B -> 5).
function convert-bytes-gce-kube() {
local -r storage_space=$1
echo "${storage_space}" | sed -e 's/^\([0-9]\+\)\([A-Z]\)\?i\?B$/\1\2/g' -e 's/\([A-Z]\)$/\1i/'
}
# This should happen both on cluster initialization and node upgrades.
#
# - Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and
# KUBELET_KEY to generate a kubeconfig file for the kubelet to securely
# connect to the apiserver.
function create-salt-kubelet-auth() {
local -r kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/bootstrap-kubeconfig"
if [ ! -e "${kubelet_kubeconfig_file}" ]; then
mkdir -p /srv/salt-overlay/salt/kubelet
(umask 077;
cat > "${kubelet_kubeconfig_file}" <<EOF
apiVersion: v1
kind: Config
users:
- name: kubelet
user:
client-certificate: ${KUBELET_CERT_PATH}
client-key: ${KUBELET_KEY_PATH}
clusters:
- name: local
cluster:
server: https://${KUBERNETES_MASTER_NAME}
certificate-authority: ${CA_CERT_BUNDLE_PATH}
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context
EOF
)
fi
}
# This should happen both on cluster initialization and node upgrades.
#
# - When run as static pods, use the CA_CERT and KUBE_PROXY_TOKEN to generate a
# kubeconfig file for the kube-proxy to securely connect to the apiserver.
function create-salt-kubeproxy-auth() {
local -r kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig"
if [ ! -e "${kube_proxy_kubeconfig_file}" ]; then
mkdir -p /srv/salt-overlay/salt/kube-proxy
(umask 077;
cat > "${kube_proxy_kubeconfig_file}" <<EOF
apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
clusters:
- name: local
cluster:
certificate-authority-data: ${CA_CERT_BUNDLE}
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context
EOF
)
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
# TODO(zmerlynn): Now we REALLy have no excuse not to do the reboot
# optimization.
local -r server_binary_tar_urls=( $(split-commas "${SERVER_BINARY_TAR_URL}") )
local -r server_binary_tar="${server_binary_tar_urls[0]##*/}"
if [[ -n "${SERVER_BINARY_TAR_HASH:-}" ]]; then
local -r server_binary_tar_hash="${SERVER_BINARY_TAR_HASH}"
else
echo "Downloading binary release sha1 (not found in env)"
download-or-bust "" "${server_binary_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
local -r server_binary_tar_hash=$(cat "${server_binary_tar}.sha1")
fi
echo "Downloading binary release tar (${server_binary_tar_urls[@]})"
download-or-bust "${server_binary_tar_hash}" "${server_binary_tar_urls[@]}"
local -r salt_tar_urls=( $(split-commas "${SALT_TAR_URL}") )
local -r salt_tar="${salt_tar_urls[0]##*/}"
if [[ -n "${SALT_TAR_HASH:-}" ]]; then
local -r salt_tar_hash="${SALT_TAR_HASH}"
else
echo "Downloading Salt tar sha1 (not found in env)"
download-or-bust "" "${salt_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
local -r salt_tar_hash=$(cat "${salt_tar}.sha1")
fi
echo "Downloading Salt tar (${salt_tar_urls[@]})"
download-or-bust "${salt_tar_hash}" "${salt_tar_urls[@]}"
echo "Unpacking Salt tree and checking integrity of binary release tar"
rm -rf kubernetes
tar xzf "${salt_tar}" && tar tzf "${server_binary_tar}" > /dev/null
}
function download-release() {
# In case of failure checking integrity of release, retry.
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running release install script"
kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR_URL##*/}"
}
function fix-apt-sources() {
sed -i -e "\|^deb.*http://http.debian.net/debian| s/^/#/" /etc/apt/sources.list
sed -i -e "\|^deb.*http://ftp.debian.org/debian| s/^/#/" /etc/apt/sources.list.d/backports.list
}
function salt-run-local() {
cat <<EOF >/etc/salt/minion.d/local.conf
file_client: local
file_roots:
base:
- /srv/salt
EOF
}
function salt-debug-log() {
cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
log_level: debug
log_level_logfile: debug
EOF
}
function salt-node-role() {
local -r kubelet_bootstrap_kubeconfig="/srv/salt-overlay/salt/kubelet/bootstrap-kubeconfig"
local -r kubelet_kubeconfig="/srv/salt-overlay/salt/kubelet/kubeconfig"
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
roles:
- kubernetes-pool
cloud: gce
api_servers: '${KUBERNETES_MASTER_NAME}'
kubelet_bootstrap_kubeconfig: /var/lib/kubelet/bootstrap-kubeconfig
kubelet_kubeconfig: /var/lib/kubelet/kubeconfig
EOF
}
function env-to-grains {
local key=$1
local env_key=`echo $key | tr '[:lower:]' '[:upper:]'`
local value=${!env_key:-}
if [[ -n "${value}" ]]; then
# Note this is yaml, so indentation matters
cat <<EOF >>/etc/salt/minion.d/grains.conf
${key}: '$(echo "${value}" | sed -e "s/'/''/g")'
EOF
fi
}
function node-docker-opts() {
if [[ -n "${EXTRA_DOCKER_OPTS-}" ]]; then
DOCKER_OPTS="${DOCKER_OPTS:-} ${EXTRA_DOCKER_OPTS}"
fi
# Decide whether to enable a docker registry mirror. This is taken from
# the "kube-env" metadata value.
if [[ -n "${DOCKER_REGISTRY_MIRROR_URL:-}" ]]; then
echo "Enable docker registry mirror at: ${DOCKER_REGISTRY_MIRROR_URL}"
DOCKER_OPTS="${DOCKER_OPTS:-} --registry-mirror=${DOCKER_REGISTRY_MIRROR_URL}"
fi
}
function salt-grains() {
env-to-grains "docker_opts"
env-to-grains "docker_root"
env-to-grains "kubelet_root"
env-to-grains "feature_gates"
}
function configure-salt() {
mkdir -p /etc/salt/minion.d
salt-run-local
salt-node-role
node-docker-opts
salt-grains
install-salt
stop-salt-minion
}
function run-salt() {
echo "== Calling Salt =="
local rc=0
for i in {0..6}; do
salt-call --retcode-passthrough --local state.highstate && rc=0 || rc=$?
if [[ "${rc}" == 0 ]]; then
return 0
fi
done
echo "Salt failed to run repeatedly" >&2
return "${rc}"
}
function run-user-script() {
if curl-metadata k8s-user-startup-script > "${INSTALL_DIR}/k8s-user-script.sh"; then
user_script=$(cat "${INSTALL_DIR}/k8s-user-script.sh")
fi
if [[ ! -z ${user_script:-} ]]; then
chmod u+x "${INSTALL_DIR}/k8s-user-script.sh"
echo "== running user startup script =="
"${INSTALL_DIR}/k8s-user-script.sh"
fi
}
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
echo "Support for debian master has been removed"
exit 1
fi
if [[ -z "${is_push}" ]]; then
echo "== kube-up node config starting =="
set-broken-motd
ensure-basic-networking
fix-apt-sources
ensure-install-dir
ensure-packages
set-kube-env
auto-upgrade
ensure-local-disks
create-node-pki
create-salt-pillar
create-salt-kubelet-auth
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
create-salt-kubeproxy-auth
fi
download-release
configure-salt
remove-docker-artifacts
config-ip-firewall
run-salt
reset-motd
run-user-script
echo "== kube-up node config done =="
else
echo "== kube-push node config starting =="
ensure-basic-networking
ensure-install-dir
set-kube-env
create-salt-pillar
download-release
reset-motd
run-salt
echo "== kube-push node config done =="
fi

View File

@ -1,8 +0,0 @@
approvers:
- euank
- yifan-gu
- ethernetdan
reviewers:
- euank
- yifan-gu
- ethernetdan

View File

@ -1,8 +0,0 @@
# Container Linux image
The [Container Linux Operating System](https://coreos.com/why/) is a Linux distribution optimized for running containers securely at scale.
CoreOS provides [a Container Linux image](https://coreos.com/os/docs/latest/booting-on-google-compute-engine.html) for Google Cloud Platform (GCP).
This folder contains configuration and tooling to allow kube-up to create a Kubernetes cluster on Google Cloud Platform running on the official Container Linux image.
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/gce/container-linux/README.md?pixel)]()

File diff suppressed because it is too large Load Diff

View File

@ -1,182 +0,0 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# Use --retry-connrefused opt only if it's supported by curl.
CURL_RETRY_CONNREFUSED=""
if curl --help | grep -q -- '--retry-connrefused'; then
CURL_RETRY_CONNREFUSED='--retry-connrefused'
fi
function download-kube-env {
# Fetch kube-env from GCE metadata server.
local -r tmp_kube_env="/tmp/kube-env.yaml"
curl --fail --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o "${tmp_kube_env}" \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env
# Convert the yaml format file into a shell-style file.
sed 's/: /=/' < "${tmp_kube_env}" > "${KUBE_HOME}/kube-env"
rm -f "${tmp_kube_env}"
}
function validate-hash {
local -r file="$1"
local -r expected="$2"
actual=$(sha1sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} =="
return 1
fi
}
# Retry a download until we get it. Takes a hash and a set of URLs.
#
# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown.
# $2+ are the URLs to download.
function download-or-bust {
local -r hash="$1"
shift 1
local -r urls=( $* )
while true; do
for url in "${urls[@]}"; do
local file="${url##*/}"
rm -f "${file}"
if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 ${CURL_RETRY_CONNREFUSED} "${url}"; then
echo "== Failed to download ${url}. Retrying. =="
elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
}
function split-commas {
echo $1 | tr "," "\n"
}
# Downloads kubernetes binaries and kube-system manifest tarball, unpacks them,
# and places them into suitable directories. Files are placed in /opt/kubernetes.
function install-kube-binary-config {
cd "${KUBE_HOME}"
local -r server_binary_tar_urls=( $(split-commas "${SERVER_BINARY_TAR_URL}") )
local -r server_binary_tar="${server_binary_tar_urls[0]##*/}"
if [[ -n "${SERVER_BINARY_TAR_HASH:-}" ]]; then
local -r server_binary_tar_hash="${SERVER_BINARY_TAR_HASH}"
else
echo "Downloading binary release sha1 (not found in env)"
download-or-bust "" "${server_binary_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
local -r server_binary_tar_hash=$(cat "${server_binary_tar}.sha1")
fi
echo "Downloading binary release tar"
download-or-bust "${server_binary_tar_hash}" "${server_binary_tar_urls[@]}"
tar xzf "${KUBE_HOME}/${server_binary_tar}" -C "${KUBE_HOME}" --overwrite
# Copy docker_tag and image files to ${KUBE_HOME}/kube-docker-files.
src_dir="${KUBE_HOME}/kubernetes/server/bin"
dst_dir="${KUBE_HOME}/kube-docker-files"
mkdir -p "${dst_dir}"
cp "${src_dir}/"*.docker_tag "${dst_dir}"
if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then
cp "${src_dir}/kube-proxy.tar" "${dst_dir}"
else
cp "${src_dir}/kube-apiserver.tar" "${dst_dir}"
cp "${src_dir}/kube-controller-manager.tar" "${dst_dir}"
cp "${src_dir}/kube-scheduler.tar" "${dst_dir}"
cp -r "${KUBE_HOME}/kubernetes/addons" "${dst_dir}"
fi
local -r kube_bin="${KUBE_HOME}/bin"
mv "${src_dir}/kubelet" "${kube_bin}"
mv "${src_dir}/kubectl" "${kube_bin}"
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]] || \
[[ "${NETWORK_PROVIDER:-}" == "cni" ]]; then
local -r cni_version="v0.6.0"
local -r cni_tar="cni-plugins-amd64-${cni_version}.tgz"
local -r cni_sha1="d595d3ded6499a64e8dac02466e2f5f2ce257c9f"
download-or-bust "${cni_sha1}" "https://storage.googleapis.com/kubernetes-release/network-plugins/${cni_tar}"
local -r cni_dir="${KUBE_HOME}/cni"
mkdir -p "${cni_dir}/bin"
tar xzf "${KUBE_HOME}/${cni_tar}" -C "${cni_dir}/bin" --overwrite
mv "${cni_dir}/bin"/* "${kube_bin}"
rmdir "${cni_dir}/bin"
rm -f "${KUBE_HOME}/${cni_tar}"
fi
mv "${KUBE_HOME}/kubernetes/LICENSES" "${KUBE_HOME}"
mv "${KUBE_HOME}/kubernetes/kubernetes-src.tar.gz" "${KUBE_HOME}"
# Put kube-system pods manifests in ${KUBE_HOME}/kube-manifests/.
dst_dir="${KUBE_HOME}/kube-manifests"
mkdir -p "${dst_dir}"
local -r manifests_tar_urls=( $(split-commas "${KUBE_MANIFESTS_TAR_URL}") )
local -r manifests_tar="${manifests_tar_urls[0]##*/}"
if [ -n "${KUBE_MANIFESTS_TAR_HASH:-}" ]; then
local -r manifests_tar_hash="${KUBE_MANIFESTS_TAR_HASH}"
else
echo "Downloading k8s manifests sha1 (not found in env)"
download-or-bust "" "${manifests_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
local -r manifests_tar_hash=$(cat "${manifests_tar}.sha1")
fi
echo "Downloading k8s manifests tar"
download-or-bust "${manifests_tar_hash}" "${manifests_tar_urls[@]}"
tar xzf "${KUBE_HOME}/${manifests_tar}" -C "${dst_dir}" --overwrite
local -r kube_addon_registry="${KUBE_ADDON_REGISTRY:-gcr.io/google_containers}"
if [[ "${kube_addon_registry}" != "gcr.io/google_containers" ]]; then
find "${dst_dir}" -name \*.yaml -or -name \*.yaml.in | \
xargs sed -ri "s@(image:\s.*)gcr.io/google_containers@\1${kube_addon_registry}@"
find "${dst_dir}" -name \*.manifest -or -name \*.json | \
xargs sed -ri "s@(image\":\s+\")gcr.io/google_containers@\1${kube_addon_registry}@"
fi
cp "${dst_dir}/kubernetes/gci-trusty/container-linux-configure-helper.sh" "${KUBE_HOME}/bin/configure-helper.sh"
chmod -R 755 "${kube_bin}"
# Clean up.
rm -rf "${KUBE_HOME}/kubernetes"
rm -f "${KUBE_HOME}/${server_binary_tar}"
rm -f "${KUBE_HOME}/${server_binary_tar}.sha1"
rm -f "${KUBE_HOME}/${manifests_tar}"
rm -f "${KUBE_HOME}/${manifests_tar}.sha1"
}
######### Main Function ##########
echo "Start to install kubernetes files"
KUBE_HOME="/opt/kubernetes"
mkdir -p "${KUBE_HOME}"
download-kube-env
source "${KUBE_HOME}/kube-env"
install-kube-binary-config
echo "Done for installing kubernetes files"
# On Container Linux, the hosts is in /usr/share/baselayout/hosts
# So we need to manually populdate the hosts file here on gce.
echo "127.0.0.1 localhost" >> /etc/hosts
echo "::1 localhost" >> /etc/hosts
echo "Configuring hostname"
hostnamectl set-hostname $(hostname | cut -f1 -d.)

View File

@ -1,83 +0,0 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is for master and node instance health monitoring, which is
# packed in kube-manifest tarball. It is executed through a systemd service
# in cluster/gce/gci/<master/node>.yaml. The env variables come from an env
# file provided by the systemd service.
set -o nounset
set -o pipefail
# We simply kill the process when there is a failure. Another systemd service will
# automatically restart the process.
function docker_monitoring {
while [ 1 ]; do
if ! timeout 60 docker ps > /dev/null; then
echo "Docker daemon failed!"
pkill docker
# Wait for a while, as we don't want to kill it again before it is really up.
sleep 30
else
sleep "${SLEEP_SECONDS}"
fi
done
}
function kubelet_monitoring {
echo "Wait for 2 minutes for kubelet to be fuctional"
# TODO(andyzheng0831): replace it with a more reliable method if possible.
sleep 120
local -r max_seconds=10
local output=""
while [ 1 ]; do
if ! output=$(curl --insecure -m "${max_seconds}" -f -s -S https://127.0.0.1:${KUBELET_PORT:-10250}/healthz 2>&1); then
# Print the response and/or errors.
echo $output
echo "Kubelet is unhealthy!"
pkill kubelet
# Wait for a while, as we don't want to kill it again before it is really up.
sleep 60
else
sleep "${SLEEP_SECONDS}"
fi
done
}
############## Main Function ################
if [[ "$#" -ne 1 ]]; then
echo "Usage: health-monitor.sh <docker/kubelet>"
exit 1
fi
KUBE_ENV="/home/kubernetes/kube-env"
if [[ ! -e "${KUBE_ENV}" ]]; then
echo "The ${KUBE_ENV} file does not exist!! Terminate health monitoring"
exit 1
fi
SLEEP_SECONDS=10
component=$1
echo "Start kubernetes health monitoring for ${component}"
source "${KUBE_ENV}"
if [[ "${component}" == "docker" ]]; then
docker_monitoring
elif [[ "${component}" == "kubelet" ]]; then
kubelet_monitoring
else
echo "Health monitoring for component "${component}" is not supported!"
fi

View File

@ -1,19 +0,0 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constants for the Container Linux distro.
# This file intentionally left blank

View File

@ -1,139 +0,0 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for the Container Linux distro.
source "${KUBE_ROOT}/cluster/gce/container-linux/helper.sh"
# create-master-instance creates the master instance. If called with
# an argument, the argument is used as the name to a reserved IP
# address for the master. (In the case of upgrade/repair, we re-use
# the same IP.)
#
# It requires a whole slew of assumed variables, partially due to to
# the call to write-master-env. Listing them would be rather
# futile. Instead, we list the required calls to ensure any additional
#
# variables are set:
# ensure-temp-dir
# detect-project
# get-bearer-token
function create-master-instance {
local address=""
[[ -n ${1:-} ]] && address="${1}"
write-master-env
create-master-instance-internal "${MASTER_NAME}" "${address}"
}
function replicate-master-instance() {
local existing_master_zone="${1}"
local existing_master_name="${2}"
local existing_master_replicas="${3}"
local kube_env="$(get-metadata "${existing_master_zone}" "${existing_master_name}" kube-env)"
# Substitute INITIAL_ETCD_CLUSTER to enable etcd clustering.
kube_env="$(echo "${kube_env}" | grep -v "INITIAL_ETCD_CLUSTER")"
kube_env="$(echo -e "${kube_env}\nINITIAL_ETCD_CLUSTER: '${existing_master_replicas},${REPLICA_NAME}'")"
ETCD_CA_KEY="$(echo "${kube_env}" | grep "ETCD_CA_KEY" | sed "s/^.*: '//" | sed "s/'$//")"
ETCD_CA_CERT="$(echo "${kube_env}" | grep "ETCD_CA_CERT" | sed "s/^.*: '//" | sed "s/'$//")"
create-etcd-certs "${REPLICA_NAME}" "${ETCD_CA_CERT}" "${ETCD_CA_KEY}"
kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_KEY")"
kube_env="$(echo -e "${kube_env}\nETCD_PEER_KEY: '${ETCD_PEER_KEY_BASE64}'")"
kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_CERT")"
kube_env="$(echo -e "${kube_env}\nETCD_PEER_CERT: '${ETCD_PEER_CERT_BASE64}'")"
echo "${kube_env}" > ${KUBE_TEMP}/master-kube-env.yaml
get-metadata "${existing_master_zone}" "${existing_master_name}" cluster-name > "${KUBE_TEMP}/cluster-name.txt"
create-master-instance-internal "${REPLICA_NAME}"
}
function create-master-instance-internal() {
local gcloud="gcloud"
local retries=5
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
gcloud="gcloud beta"
fi
local -r master_name="${1}"
local -r address="${2:-}"
local preemptible_master=""
if [[ "${PREEMPTIBLE_MASTER:-}" == "true" ]]; then
preemptible_master="--preemptible --maintenance-policy TERMINATE"
fi
local network=$(make-gcloud-network-argument \
"${NETWORK_PROJECT}" "${REGION}" "${NETWORK}" "${SUBNETWORK:-}" \
"${address:-}" "${ENABLE_IP_ALIASES:-}" "${IP_ALIAS_SIZE:-}")
local metadata="kube-env=${KUBE_TEMP}/master-kube-env.yaml"
metadata="${metadata},user-data=${KUBE_ROOT}/cluster/gce/container-linux/master.yaml"
metadata="${metadata},configure-sh=${KUBE_ROOT}/cluster/gce/container-linux/configure.sh"
metadata="${metadata},cluster-name=${KUBE_TEMP}/cluster-name.txt"
local disk="name=${master_name}-pd"
disk="${disk},device-name=master-pd"
disk="${disk},mode=rw"
disk="${disk},boot=no"
disk="${disk},auto-delete=no"
for attempt in $(seq 1 ${retries}); do
if result=$(${gcloud} compute instances create "${master_name}" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--machine-type "${MASTER_SIZE}" \
--image-project="${MASTER_IMAGE_PROJECT}" \
--image "${MASTER_IMAGE}" \
--tags "${MASTER_TAG}" \
--scopes "storage-ro,compute-rw,monitoring,logging-write" \
--metadata-from-file "${metadata}" \
--disk "${disk}" \
--boot-disk-size "${MASTER_ROOT_DISK_SIZE}" \
${MASTER_MIN_CPU_ARCHITECTURE:+"--min-cpu-platform=${MASTER_MIN_CPU_ARCHITECTURE}"} \
${preemptible_master} \
${network} 2>&1); then
echo "${result}" >&2
return 0
else
echo "${result}" >&2
if [[ ! "${result}" =~ "try again later" ]]; then
echo "Failed to create master instance due to non-retryable error" >&2
return 1
fi
sleep 10
fi
done
echo "Failed to create master instance despite ${retries} attempts" >&2
return 1
}
function get-metadata() {
local zone="${1}"
local name="${2}"
local key="${3}"
local metadata_url="http://metadata.google.internal/computeMetadata/v1/instance/attributes/${key}"
gcloud compute ssh "${name}" \
--project "${PROJECT}" \
--zone "${zone}" \
--command "curl '${metadata_url}' -H 'Metadata-Flavor: Google'" 2>/dev/null
}

View File

@ -1,57 +0,0 @@
#cloud-config
coreos:
update:
reboot-strategy: off
units:
- name: locksmithd.service
mask: true
- name: kube-master-installation.service
command: start
content: |
[Unit]
Description=Download and install k8s binaries and configurations
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/bin/mkdir -p /opt/kubernetes/bin
# Use --retry-connrefused opt only if it's supported by curl.
ExecStartPre=/bin/bash -c 'OPT=""; if curl --help | grep -q -- "--retry-connrefused"; then OPT="--retry-connrefused"; fi; /usr/bin/curl --fail --retry 5 --retry-delay 3 $OPT --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh'
ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure.sh
ExecStart=/opt/kubernetes/bin/configure.sh
[Install]
WantedBy=kubernetes.target
- name: kube-master-configuration.service
command: start
content: |
[Unit]
Description=Configure kubernetes master
After=kube-master-installation.service
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure-helper.sh
ExecStart=/opt/kubernetes/bin/configure-helper.sh
[Install]
WantedBy=kubernetes.target
- name: kubernetes.target
enable: true
command: start
content: |
[Unit]
Description=Kubernetes
[Install]
WantedBy=multi-user.target
- name: docker.service
drop-ins:
- name: "use-cgroupfs-driver.conf"
# This is required for setting cgroup parent in the current ~1.4 per-pod cgroup impl
content: |
[Service]
Environment="DOCKER_CGROUPS=--exec-opt native.cgroupdriver="

View File

@ -1,35 +0,0 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for the Container Linux distro.
source "${KUBE_ROOT}/cluster/gce/container-linux/helper.sh"
function get-node-instance-metadata {
local metadata=""
metadata+="kube-env=${KUBE_TEMP}/node-kube-env.yaml,"
metadata+="user-data=${KUBE_ROOT}/cluster/gce/container-linux/node.yaml,"
metadata+="configure-sh=${KUBE_ROOT}/cluster/gce/container-linux/configure.sh,"
metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt"
echo "${metadata}"
}
# $1: template name (required).
function create-node-instance-template {
local template_name="$1"
create-node-template "$template_name" "${scope_flags[*]}" "$(get-node-instance-metadata)"
# TODO(euank): We should include update-strategy here. We should also switch to ignition
}

View File

@ -1,57 +0,0 @@
#cloud-config
coreos:
update:
reboot-strategy: off
units:
- name: locksmithd.service
mask: true
- name: kube-node-installation.service
command: start
content: |
[Unit]
Description=Download and install k8s binaries and configurations
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/bin/mkdir -p /opt/kubernetes/bin
# Use --retry-connrefused opt only if it's supported by curl.
ExecStartPre=/bin/bash -c 'OPT=""; if curl --help | grep -q -- "--retry-connrefused"; then OPT="--retry-connrefused"; fi; /usr/bin/curl --fail --retry 5 --retry-delay 3 $OPT --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh'
ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure.sh
ExecStart=/opt/kubernetes/bin/configure.sh
[Install]
WantedBy=kubernetes.target
- name: kube-node-configuration.service
command: start
content: |
[Unit]
Description=Configure kubernetes master
After=kube-node-installation.service
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure-helper.sh
ExecStart=/opt/kubernetes/bin/configure-helper.sh
[Install]
WantedBy=kubernetes.target
- name: kubernetes.target
enable: true
command: start
content: |
[Unit]
Description=Kubernetes
[Install]
WantedBy=multi-user.target
- name: docker.service
drop-ins:
- name: "use-cgroupfs-driver.conf"
# This is required for setting cgroup parent in the current ~1.4 per-pod cgroup impl
content: |
[Service]
Environment="DOCKER_CGROUPS=--exec-opt native.cgroupdriver="

1
vendor/k8s.io/kubernetes/cluster/gce/custom generated vendored Symbolic link
View File

@ -0,0 +1 @@
gci

View File

@ -1,32 +0,0 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for debian os distro
function get-node-instance-metadata {
local metadata=""
metadata+="startup-script=${KUBE_TEMP}/configure-vm.sh,"
metadata+="kube-env=${KUBE_TEMP}/node-kube-env.yaml,"
metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt"
echo "${metadata}"
}
# $1: template name (required)
function create-node-instance-template {
local template_name="$1"
prepare-startup-script
create-node-template "$template_name" "${scope_flags}" "$(get-node-instance-metadata)"
}

View File

@ -77,7 +77,9 @@ function config-ip-firewall {
iptables -w -t nat -A IP-MASQ -m comment --comment "ip-masq: outbound traffic is subject to MASQUERADE (must be last in chain)" -j MASQUERADE
fi
if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]]; then
# If METADATA_CONCEALMENT_NO_FIREWALL is set, don't create a firewall on this
# node because we don't expect the daemonset to run on this node.
if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]] && [[ ! "${METADATA_CONCEALMENT_NO_FIREWALL:-}" == "true" ]]; then
echo "Add rule for metadata concealment"
iptables -w -t nat -I PREROUTING -p tcp -d 169.254.169.254 --dport 80 -m comment --comment "metadata-concealment: bridge traffic to metadata server goes to metadata proxy" -j DNAT --to-destination 127.0.0.1:988
fi
@ -118,7 +120,7 @@ function get-local-disk-num() {
function safe-block-symlink(){
local device="${1}"
local symdir="${2}"
mkdir -p "${symdir}"
get-or-generate-uuid "${device}"
@ -192,11 +194,11 @@ function unique-uuid-bind-mount(){
# Trigger udev refresh so that newly formatted devices are propagated in by-uuid
udevadm control --reload-rules
udevadm trigger
udevadm settle
udevadm settle
# grep the exact match of actual device, prevents substring matching
local myuuid=$(ls -l /dev/disk/by-uuid/ | grep "/${actual_device}$" | tr -s ' ' | cut -d ' ' -f 9)
# myuuid should be the uuid of the device as found in /dev/disk/by-uuid/
# myuuid should be the uuid of the device as found in /dev/disk/by-uuid/
if [[ -z "${myuuid}" ]]; then
echo "Failed to get a uuid for device ${actual_device} when mounting." >&2
exit 2
@ -228,7 +230,7 @@ function mount-ext(){
local devicenum="${2}"
local interface="${3}"
local format="${4}"
if [[ -z "${devicenum}" ]]; then
echo "Failed to get the local disk number for device ${ssd}" >&2
@ -727,6 +729,7 @@ function create-master-audit-policy {
- group: "networking.k8s.io"
- group: "policy"
- group: "rbac.authorization.k8s.io"
- group: "scheduling.k8s.io"
- group: "settings.k8s.io"
- group: "storage.k8s.io"'
@ -992,6 +995,14 @@ current-context: kube-scheduler
EOF
}
function create-kubescheduler-policy-config {
echo "Creating kube-scheduler policy config file"
mkdir -p /etc/srv/kubernetes/kube-scheduler
cat <<EOF >/etc/srv/kubernetes/kube-scheduler/policy-config
${SCHEDULER_POLICY_CONFIG}
EOF
}
function create-node-problem-detector-kubeconfig {
echo "Creating node-problem-detector kubeconfig file"
mkdir -p /var/lib/node-problem-detector
@ -1061,23 +1072,11 @@ function assemble-docker-flags {
echo "DOCKER_OPTS=\"${docker_opts} ${EXTRA_DOCKER_OPTS:-}\"" > /etc/default/docker
if [[ "${use_net_plugin}" == "true" ]]; then
# If using a network plugin, extend the docker configuration to always remove
# the network checkpoint to avoid corrupt checkpoints.
# (https://github.com/docker/docker/issues/18283).
echo "Extend the docker.service configuration to remove the network checkpiont"
mkdir -p /etc/systemd/system/docker.service.d
cat <<EOF >/etc/systemd/system/docker.service.d/01network.conf
[Service]
ExecStartPre=/bin/sh -x -c "rm -rf /var/lib/docker/network"
EOF
fi
# Ensure TasksMax is sufficient for docker.
# (https://github.com/kubernetes/kubernetes/issues/51977)
echo "Extend the docker.service configuration to set a higher pids limit"
mkdir -p /etc/systemd/system/docker.service.d
cat <<EOF >/etc/systemd/system/docker.service.d/02tasksmax.conf
cat <<EOF >/etc/systemd/system/docker.service.d/01tasksmax.conf
[Service]
TasksMax=infinity
EOF
@ -1092,8 +1091,8 @@ EOF
function start-kubelet {
echo "Start kubelet"
local -r kubelet_cert_dir="/var/lib/kubelet/pki/"
mkdir -p "${kubelet_cert_dir}"
# TODO(#60123): The kubelet should create the cert-dir directory if it doesn't exist
mkdir -p /var/lib/kubelet/pki/
local kubelet_bin="${KUBE_HOME}/bin/kubelet"
local -r version="$("${kubelet_bin}" --version=true | cut -f2 -d " ")"
@ -1111,114 +1110,9 @@ function start-kubelet {
fi
fi
echo "Using kubelet binary at ${kubelet_bin}"
local flags="${KUBELET_TEST_LOG_LEVEL:-"--v=2"} ${KUBELET_TEST_ARGS:-}"
flags+=" --allow-privileged=true"
flags+=" --cgroup-root=/"
flags+=" --cloud-provider=gce"
flags+=" --cluster-dns=${DNS_SERVER_IP}"
flags+=" --cluster-domain=${DNS_DOMAIN}"
flags+=" --pod-manifest-path=/etc/kubernetes/manifests"
flags+=" --experimental-mounter-path=${CONTAINERIZED_MOUNTER_HOME}/mounter"
flags+=" --experimental-check-node-capabilities-before-mount=true"
flags+=" --cert-dir=${kubelet_cert_dir}"
if [[ -n "${KUBELET_PORT:-}" ]]; then
flags+=" --port=${KUBELET_PORT}"
fi
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
flags+=" ${MASTER_KUBELET_TEST_ARGS:-}"
flags+=" --enable-debugging-handlers=false"
flags+=" --hairpin-mode=none"
if [[ "${REGISTER_MASTER_KUBELET:-false}" == "true" ]]; then
#TODO(mikedanese): allow static pods to start before creating a client
#flags+=" --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig"
#flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig"
flags+=" --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig"
flags+=" --register-schedulable=false"
else
# Standalone mode (not widely used?)
flags+=" --pod-cidr=${MASTER_IP_RANGE}"
fi
else # For nodes
flags+=" ${NODE_KUBELET_TEST_ARGS:-}"
flags+=" --enable-debugging-handlers=true"
flags+=" --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig"
flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig"
if [[ "${HAIRPIN_MODE:-}" == "promiscuous-bridge" ]] || \
[[ "${HAIRPIN_MODE:-}" == "hairpin-veth" ]] || \
[[ "${HAIRPIN_MODE:-}" == "none" ]]; then
flags+=" --hairpin-mode=${HAIRPIN_MODE}"
fi
flags+=" --anonymous-auth=false --authorization-mode=Webhook --client-ca-file=${CA_CERT_BUNDLE_PATH}"
fi
# Network plugin
if [[ -n "${NETWORK_PROVIDER:-}" || -n "${NETWORK_POLICY_PROVIDER:-}" ]]; then
flags+=" --cni-bin-dir=/home/kubernetes/bin"
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
# Calico uses CNI always.
# Keep KUBERNETES_PRIVATE_MASTER for backward compatibility.
# Note that network policy won't work for master node.
if [[ "${KUBERNETES_PRIVATE_MASTER:-}" == "true" || "${KUBERNETES_MASTER:-}" == "true" ]]; then
flags+=" --network-plugin=${NETWORK_PROVIDER}"
else
flags+=" --network-plugin=cni"
fi
else
# Otherwise use the configured value.
flags+=" --network-plugin=${NETWORK_PROVIDER}"
fi
fi
if [[ -n "${NON_MASQUERADE_CIDR:-}" ]]; then
flags+=" --non-masquerade-cidr=${NON_MASQUERADE_CIDR}"
fi
# FlexVolume plugin
if [[ -n "${VOLUME_PLUGIN_DIR:-}" ]]; then
flags+=" --volume-plugin-dir=${VOLUME_PLUGIN_DIR}"
fi
if [[ "${ENABLE_MANIFEST_URL:-}" == "true" ]]; then
flags+=" --manifest-url=${MANIFEST_URL}"
flags+=" --manifest-url-header=${MANIFEST_URL_HEADER}"
fi
if [[ -n "${ENABLE_CUSTOM_METRICS:-}" ]]; then
flags+=" --enable-custom-metrics=${ENABLE_CUSTOM_METRICS}"
fi
local node_labels=""
if [[ "${KUBE_PROXY_DAEMONSET:-}" == "true" && "${KUBERNETES_MASTER:-}" != "true" ]]; then
# Add kube-proxy daemonset label to node to avoid situation during cluster
# upgrade/downgrade when there are two instances of kube-proxy running on a node.
node_labels="beta.kubernetes.io/kube-proxy-ds-ready=true"
fi
if [[ -n "${NODE_LABELS:-}" ]]; then
node_labels="${node_labels:+${node_labels},}${NODE_LABELS}"
fi
if [[ -n "${NON_MASTER_NODE_LABELS:-}" && "${KUBERNETES_MASTER:-}" != "true" ]]; then
node_labels="${node_labels:+${node_labels},}${NON_MASTER_NODE_LABELS}"
fi
if [[ -n "${node_labels:-}" ]]; then
flags+=" --node-labels=${node_labels}"
fi
if [[ -n "${NODE_TAINTS:-}" ]]; then
flags+=" --register-with-taints=${NODE_TAINTS}"
fi
if [[ -n "${EVICTION_HARD:-}" ]]; then
flags+=" --eviction-hard=${EVICTION_HARD}"
fi
if [[ -n "${FEATURE_GATES:-}" ]]; then
flags+=" --feature-gates=${FEATURE_GATES}"
fi
if [[ -n "${ROTATE_CERTIFICATES:-}" ]]; then
flags+=" --rotate-certificates=true"
fi
if [[ -n "${CONTAINER_RUNTIME:-}" ]]; then
flags+=" --container-runtime=${CONTAINER_RUNTIME}"
fi
if [[ -n "${CONTAINER_RUNTIME_ENDPOINT:-}" ]]; then
flags+=" --container-runtime-endpoint=${CONTAINER_RUNTIME_ENDPOINT}"
fi
local -r kubelet_env_file="/etc/default/kubelet"
echo "KUBELET_OPTS=\"${flags}\"" > "${kubelet_env_file}"
echo "KUBELET_OPTS=\"${KUBELET_ARGS}\"" > "${kubelet_env_file}"
# Write the systemd service file for kubelet.
cat <<EOF >/etc/systemd/system/kubelet.service
@ -1289,10 +1183,8 @@ function prepare-log-file {
function prepare-kube-proxy-manifest-variables {
local -r src_file=$1;
remove-salt-config-comments "${src_file}"
local -r kubeconfig="--kubeconfig=/var/lib/kube-proxy/kubeconfig"
local kube_docker_registry="gcr.io/google_containers"
local kube_docker_registry="k8s.gcr.io"
if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then
kube_docker_registry=${KUBE_DOCKER_REGISTRY}
fi
@ -1302,6 +1194,9 @@ function prepare-kube-proxy-manifest-variables {
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi
if [[ "${KUBE_PROXY_MODE:-}" == "ipvs" ]];then
params+=" --proxy-mode=ipvs --feature-gates=SupportIPVSProxyMode=true"
fi
params+=" --iptables-sync-period=1m --iptables-min-sync-period=10s --ipvs-sync-period=1m --ipvs-min-sync-period=10s"
if [[ -n "${KUBEPROXY_TEST_ARGS:-}" ]]; then
params+=" ${KUBEPROXY_TEST_ARGS}"
@ -1338,7 +1233,7 @@ function prepare-kube-proxy-manifest-variables {
function start-kube-proxy {
echo "Start kube-proxy static pod"
prepare-log-file /var/log/kube-proxy.log
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/kube-proxy.manifest"
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-proxy.manifest"
prepare-kube-proxy-manifest-variables "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
@ -1353,6 +1248,7 @@ function start-kube-proxy {
# $5: pod name, which should be either etcd or etcd-events
function prepare-etcd-manifest {
local host_name=${ETCD_HOSTNAME:-$(hostname -s)}
local host_ip=$(python -c "import socket;print(socket.gethostbyname(\"${host_name}\"))")
local etcd_cluster=""
local cluster_state="new"
local etcd_protocol="http"
@ -1376,12 +1272,12 @@ function prepare-etcd-manifest {
local -r temp_file="/tmp/$5"
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd.manifest" "${temp_file}"
remove-salt-config-comments "${temp_file}"
sed -i -e "s@{{ *suffix *}}@$1@g" "${temp_file}"
sed -i -e "s@{{ *port *}}@$2@g" "${temp_file}"
sed -i -e "s@{{ *server_port *}}@$3@g" "${temp_file}"
sed -i -e "s@{{ *cpulimit *}}@\"$4\"@g" "${temp_file}"
sed -i -e "s@{{ *hostname *}}@$host_name@g" "${temp_file}"
sed -i -e "s@{{ *host_ip *}}@$host_ip@g" "${temp_file}"
sed -i -e "s@{{ *srv_kube_path *}}@/etc/srv/kubernetes@g" "${temp_file}"
sed -i -e "s@{{ *etcd_cluster *}}@$etcd_cluster@g" "${temp_file}"
sed -i -e "s@{{ *liveness_probe_initial_delay *}}@${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${temp_file}"
@ -1395,7 +1291,7 @@ function prepare-etcd-manifest {
sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
if [[ "${STORAGE_BACKEND:-${default_storage_backend}}" == "etcd3" ]]; then
sed -i -e "s@{{ *quota_bytes *}}@--quota-backend-bytes=4294967296@g" "${temp_file}"
sed -i -e "s@{{ *quota_bytes *}}@--quota-backend-bytes=${ETCD_QUOTA_BACKEND_BYTES:-4294967296}@g" "${temp_file}"
else
sed -i -e "s@{{ *quota_bytes *}}@@g" "${temp_file}"
fi
@ -1456,6 +1352,8 @@ function start-etcd-servers {
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
# DOCKER_REGISTRY
# FLEXVOLUME_HOSTPATH_MOUNT
# FLEXVOLUME_HOSTPATH_VOLUME
function compute-master-manifest-variables {
CLOUD_CONFIG_OPT=""
CLOUD_CONFIG_VOLUME=""
@ -1465,10 +1363,17 @@ function compute-master-manifest-variables {
CLOUD_CONFIG_VOLUME="{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"/etc/gce.conf\", \"type\": \"FileOrCreate\"}},"
CLOUD_CONFIG_MOUNT="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true},"
fi
DOCKER_REGISTRY="gcr.io/google_containers"
DOCKER_REGISTRY="k8s.gcr.io"
if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then
DOCKER_REGISTRY="${KUBE_DOCKER_REGISTRY}"
fi
FLEXVOLUME_HOSTPATH_MOUNT=""
FLEXVOLUME_HOSTPATH_VOLUME=""
if [[ -n "${VOLUME_PLUGIN_DIR:-}" ]]; then
FLEXVOLUME_HOSTPATH_MOUNT="{ \"name\": \"flexvolumedir\", \"mountPath\": \"${VOLUME_PLUGIN_DIR}\", \"readOnly\": true},"
FLEXVOLUME_HOSTPATH_VOLUME="{ \"name\": \"flexvolumedir\", \"hostPath\": {\"path\": \"${VOLUME_PLUGIN_DIR}\"}},"
fi
}
# A helper function that bind mounts kubelet dirs for running mount in a chroot
@ -1484,17 +1389,6 @@ function prepare-mounter-rootfs {
cp /etc/resolv.conf "${CONTAINERIZED_MOUNTER_ROOTFS}/etc/"
}
# A helper function for removing salt configuration and comments from a file.
# This is mainly for preparing a manifest file.
#
# $1: Full path of the file to manipulate
function remove-salt-config-comments {
# Remove salt configuration.
sed -i "/^[ |\t]*{[#|%]/d" $1
# Remove comments.
sed -i "/^[ |\t]*#/d" $1
}
# Starts kubernetes apiserver.
# It prepares the log file, loads the docker image, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
@ -1520,6 +1414,7 @@ function start-kube-apiserver {
params+=" --secure-port=443"
params+=" --tls-cert-file=${APISERVER_SERVER_CERT_PATH}"
params+=" --tls-private-key-file=${APISERVER_SERVER_KEY_PATH}"
params+=" --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname"
if [[ -s "${REQUESTHEADER_CA_CERT_PATH:-}" ]]; then
params+=" --requestheader-client-ca-file=${REQUESTHEADER_CA_CERT_PATH}"
params+=" --requestheader-allowed-names=aggregator"
@ -1547,6 +1442,9 @@ function start-kube-apiserver {
if [[ -n "${STORAGE_MEDIA_TYPE:-}" ]]; then
params+=" --storage-media-type=${STORAGE_MEDIA_TYPE}"
fi
if [[ -n "${ETCD_COMPACTION_INTERVAL_SEC:-}" ]]; then
params+=" --etcd-compaction-interval=${ETCD_COMPACTION_INTERVAL_SEC}s"
fi
if [[ -n "${KUBE_APISERVER_REQUEST_TIMEOUT_SEC:-}" ]]; then
params+=" --request-timeout=${KUBE_APISERVER_REQUEST_TIMEOUT_SEC}s"
fi
@ -1569,6 +1467,11 @@ function start-kube-apiserver {
if [[ -n "${ETCD_QUORUM_READ:-}" ]]; then
params+=" --etcd-quorum-read=${ETCD_QUORUM_READ}"
fi
if [[ -n "${SERVICEACCOUNT_ISSUER:-}" ]]; then
params+=" --service-account-issuer=${SERVICEACCOUNT_ISSUER}"
params+=" --service-account-signing-key-file=${SERVICEACCOUNT_KEY_PATH}"
params+=" --service-account-api-audiences=${SERVICEACCOUNT_API_AUDIENCES}"
fi
local audit_policy_config_mount=""
local audit_policy_config_volume=""
@ -1676,11 +1579,9 @@ function start-kube-apiserver {
if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then
local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip")
if [[ -n "${PROXY_SSH_USER:-}" ]]; then
params+=" --advertise-address=${vm_external_ip}"
params+=" --advertise-address=${vm_external_ip}"
params+=" --ssh-user=${PROXY_SSH_USER}"
params+=" --ssh-keyfile=/etc/srv/sshproxy/.sshkeyfile"
else
params+=" --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
fi
elif [ -n "${MASTER_ADVERTISE_ADDRESS:-}" ]; then
params="${params} --advertise-address=${MASTER_ADVERTISE_ADDRESS}"
@ -1692,10 +1593,13 @@ function start-kube-apiserver {
params+=" --authentication-token-webhook-config-file=/etc/gcp_authn.config"
webhook_authn_config_mount="{\"name\": \"webhookauthnconfigmount\",\"mountPath\": \"/etc/gcp_authn.config\", \"readOnly\": false},"
webhook_authn_config_volume="{\"name\": \"webhookauthnconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_authn.config\", \"type\": \"FileOrCreate\"}},"
if [[ -n "${GCP_AUTHN_CACHE_TTL:-}" ]]; then
params+=" --authentication-token-webhook-cache-ttl=${GCP_AUTHN_CACHE_TTL}"
fi
fi
local authorization_mode="Node,RBAC"
local authorization_mode="RBAC"
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
# Enable ABAC mode unless the user explicitly opts out with ENABLE_LEGACY_ABAC=false
@ -1704,7 +1608,6 @@ function start-kube-apiserver {
# Create the ABAC file if it doesn't exist yet, or if we have a KUBE_USER set (to ensure the right user is given permissions)
if [[ -n "${KUBE_USER:-}" || ! -e /etc/srv/kubernetes/abac-authz-policy.jsonl ]]; then
local -r abac_policy_json="${src_dir}/abac-authz-policy.jsonl"
remove-salt-config-comments "${abac_policy_json}"
if [[ -n "${KUBE_USER:-}" ]]; then
sed -i -e "s/{{kube_user}}/${KUBE_USER}/g" "${abac_policy_json}"
else
@ -1720,11 +1623,18 @@ function start-kube-apiserver {
local webhook_config_mount=""
local webhook_config_volume=""
if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then
authorization_mode+=",Webhook"
authorization_mode="Webhook,${authorization_mode}"
params+=" --authorization-webhook-config-file=/etc/gcp_authz.config"
webhook_config_mount="{\"name\": \"webhookconfigmount\",\"mountPath\": \"/etc/gcp_authz.config\", \"readOnly\": false},"
webhook_config_volume="{\"name\": \"webhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_authz.config\", \"type\": \"FileOrCreate\"}},"
if [[ -n "${GCP_AUTHZ_CACHE_AUTHORIZED_TTL:-}" ]]; then
params+=" --authorization-webhook-cache-authorized-ttl=${GCP_AUTHZ_CACHE_AUTHORIZED_TTL}"
fi
if [[ -n "${GCP_AUTHZ_CACHE_UNAUTHORIZED_TTL:-}" ]]; then
params+=" --authorization-webhook-cache-unauthorized-ttl=${GCP_AUTHZ_CACHE_UNAUTHORIZED_TTL}"
fi
fi
authorization_mode="Node,${authorization_mode}"
params+=" --authorization-mode=${authorization_mode}"
local container_env=""
@ -1748,7 +1658,6 @@ function start-kube-apiserver {
fi
src_file="${src_dir}/kube-apiserver.manifest"
remove-salt-config-comments "${src_file}"
# Evaluate variables.
local -r kube_apiserver_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-apiserver.docker_tag)
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
@ -1828,7 +1737,7 @@ function start-kube-controller-manager {
params+=" --terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}"
fi
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
params+=" --cidr-allocator-type=CloudAllocator"
params+=" --cidr-allocator-type=${NODE_IPAM_MODE}"
params+=" --configure-cloud-routes=false"
fi
if [[ -n "${FEATURE_GATES:-}" ]]; then
@ -1858,7 +1767,6 @@ function start-kube-controller-manager {
fi
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-controller-manager.manifest"
remove-salt-config-comments "${src_file}"
# Evaluate variables.
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
@ -1871,6 +1779,9 @@ function start-kube-controller-manager {
sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}"
sed -i -e "s@{{pv_recycler_mount}}@${PV_RECYCLER_MOUNT}@g" "${src_file}"
sed -i -e "s@{{pv_recycler_volume}}@${PV_RECYCLER_VOLUME}@g" "${src_file}"
sed -i -e "s@{{flexvolume_hostpath_mount}}@${FLEXVOLUME_HOSTPATH_MOUNT}@g" "${src_file}"
sed -i -e "s@{{flexvolume_hostpath}}@${FLEXVOLUME_HOSTPATH_VOLUME}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
@ -1894,11 +1805,15 @@ function start-kube-scheduler {
if [[ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]]; then
params+=" --algorithm-provider=${SCHEDULING_ALGORITHM_PROVIDER}"
fi
if [[ -n "${SCHEDULER_POLICY_CONFIG:-}" ]]; then
create-kubescheduler-policy-config
params+=" --use-legacy-policy-config"
params+=" --policy-config-file=/etc/srv/kubernetes/kube-scheduler/policy-config"
fi
local -r kube_scheduler_docker_tag=$(cat "${KUBE_HOME}/kube-docker-files/kube-scheduler.docker_tag")
# Remove salt comments and replace variables with values.
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-scheduler.manifest"
remove-salt-config-comments "${src_file}"
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
@ -1919,7 +1834,6 @@ function start-cluster-autoscaler {
# Remove salt comments and replace variables with values
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cluster-autoscaler.manifest"
remove-salt-config-comments "${src_file}"
local params="${AUTOSCALER_MIG_CONFIG} ${CLOUD_CONFIG_OPT} ${AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
@ -1935,7 +1849,7 @@ function start-cluster-autoscaler {
#
# $1: addon category under /etc/kubernetes
# $2: manifest source dir
# $3: (optional) auxilary manifest source dir
# $3: (optional) auxiliary manifest source dir
function setup-addon-manifests {
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
local -r dst_dir="/etc/kubernetes/$1/$2"
@ -1952,6 +1866,33 @@ function setup-addon-manifests {
fi
}
# A function that downloads extra addons from a URL and puts them in the GCI
# manifests directory.
function download-extra-addons {
local -r out_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/gce-extras"
mkdir -p "${out_dir}"
local curl_cmd=(
"curl"
"--fail"
"--retry" "5"
"--retry-delay" "3"
"--silent"
"--show-error"
)
if [[ -n "${CURL_RETRY_CONNREFUSED:-}" ]]; then
curl_cmd+=("${CURL_RETRY_CONNREFUSED}")
fi
if [[ -n "${EXTRA_ADDONS_HEADER:-}" ]]; then
curl_cmd+=("-H" "${EXTRA_ADDONS_HEADER}")
fi
curl_cmd+=("-o" "${out_dir}/extras.json")
curl_cmd+=("${EXTRA_ADDONS_URL}")
"${curl_cmd[@]}"
}
# A helper function for copying manifests and setting dir/files
# permissions.
#
@ -1980,50 +1921,63 @@ function copy-manifests {
chmod 644 "${dst_dir}"/*
}
# Fluentd manifest is modified using kubectl, which may not be available at
# this point. Run this as a background process.
# Fluentd resources are modified using ScalingPolicy CR, which may not be
# available at this point. Run this as a background process.
function wait-for-apiserver-and-update-fluentd {
local -r fluentd_gcp_yaml="${1}"
local modifying_flags=""
local any_overrides=false
if [[ -n "${FLUENTD_GCP_MEMORY_LIMIT:-}" ]]; then
modifying_flags="${modifying_flags} --limits=memory=${FLUENTD_GCP_MEMORY_LIMIT}"
any_overrides=true
fi
local request_resources=""
if [[ -n "${FLUENTD_GCP_CPU_REQUEST:-}" ]]; then
request_resources="cpu=${FLUENTD_GCP_CPU_REQUEST}"
any_overrides=true
fi
if [[ -n "${FLUENTD_GCP_MEMORY_REQUEST:-}" ]]; then
if [[ -n "${request_resources}" ]]; then
request_resources="${request_resources},"
fi
request_resources="memory=${FLUENTD_GCP_MEMORY_REQUEST}"
any_overrides=true
fi
if [[ -n "${request_resources}" ]]; then
modifying_flags="${modifying_flags} --requests=${request_resources}"
if ! $any_overrides; then
# Nothing to do here.
exit
fi
until kubectl get nodes
# Wait until ScalingPolicy CRD is in place.
until kubectl get scalingpolicies.scalingpolicy.kope.io
do
sleep 10
done
local -r temp_fluentd_gcp_yaml="${fluentd_gcp_yaml}.tmp"
if kubectl set resources --dry-run --local -f ${fluentd_gcp_yaml} ${modifying_flags} \
--containers=fluentd-gcp -o yaml > ${temp_fluentd_gcp_yaml}; then
mv ${temp_fluentd_gcp_yaml} ${fluentd_gcp_yaml}
else
(echo "Failed to update fluentd resources. Used manifest:" && cat ${temp_fluentd_gcp_yaml}) >&2
rm ${temp_fluentd_gcp_yaml}
fi
# Single-shot, not managed by addon manager. Can be later modified or removed
# at will.
cat <<EOF | kubectl apply -f -
apiVersion: scalingpolicy.kope.io/v1alpha1
kind: ScalingPolicy
metadata:
name: fluentd-gcp-scaling-policy
namespace: kube-system
spec:
containers:
- name: fluentd-gcp
resources:
requests:
- resource: cpu
base: ${FLUENTD_GCP_CPU_REQUEST:-}
- resource: memory
base: ${FLUENTD_GCP_MEMORY_REQUEST:-}
limits:
- resource: memory
base: ${FLUENTD_GCP_MEMORY_LIMIT:-}
EOF
}
# Trigger background process that will ultimately update fluentd resource
# requirements.
function start-fluentd-resource-update {
local -r fluentd_gcp_yaml="${1}"
wait-for-apiserver-and-update-fluentd &
}
wait-for-apiserver-and-update-fluentd ${fluentd_gcp_yaml} &
# Update {{ container-runtime }} with actual container runtime name.
function update-container-runtime {
local -r configmap_yaml="$1"
sed -i -e "s@{{ *container_runtime *}}@${CONTAINER_RUNTIME_NAME:-docker}@g" "${configmap_yaml}"
}
# Updates parameters in yaml file for prometheus-to-sd configuration, or
@ -2038,6 +1992,11 @@ function update-prometheus-to-sd-parameters {
fi
}
# Updates parameters in yaml file for event-exporter configuration
function update-event-exporter {
sed -i -e "s@{{ *event_exporter_zone *}}@${ZONE:-}@g" "$1"
}
# Sets up the manifests of coreDNS for k8s addons.
function setup-coredns-manifest {
local -r coredns_file="${dst_dir}/dns/coredns.yaml"
@ -2064,7 +2023,7 @@ EOF
sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${kubedns_file}"
if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then
setup-addon-manifests "addons" "dns-horizontal-autoscaler"
setup-addon-manifests "addons" "dns-horizontal-autoscaler" "gce"
fi
}
@ -2128,7 +2087,6 @@ EOF
else
controller_yaml="${controller_yaml}/heapster-controller.yaml"
fi
remove-salt-config-comments "${controller_yaml}"
sed -i -e "s@{{ cluster_name }}@${CLUSTER_NAME}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_metrics_memory *}}@${base_metrics_memory}@g" "${controller_yaml}"
@ -2175,31 +2133,26 @@ EOF
setup-kube-dns-manifest
fi
fi
if [[ "${ENABLE_CLUSTER_REGISTRY:-}" == "true" ]]; then
setup-addon-manifests "addons" "registry"
local -r registry_pv_file="${dst_dir}/registry/registry-pv.yaml"
local -r registry_pvc_file="${dst_dir}/registry/registry-pvc.yaml"
mv "${dst_dir}/registry/registry-pv.yaml.in" "${registry_pv_file}"
mv "${dst_dir}/registry/registry-pvc.yaml.in" "${registry_pvc_file}"
# Replace the salt configurations with variable values.
remove-salt-config-comments "${controller_yaml}"
sed -i -e "s@{{ *pillar\['cluster_registry_disk_size'\] *}}@${CLUSTER_REGISTRY_DISK_SIZE}@g" "${registry_pv_file}"
sed -i -e "s@{{ *pillar\['cluster_registry_disk_size'\] *}}@${CLUSTER_REGISTRY_DISK_SIZE}@g" "${registry_pvc_file}"
sed -i -e "s@{{ *pillar\['cluster_registry_disk_name'\] *}}@${CLUSTER_REGISTRY_DISK}@g" "${registry_pvc_file}"
fi
if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
[[ "${LOGGING_DESTINATION:-}" == "elasticsearch" ]] && \
[[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]]; then
setup-addon-manifests "addons" "fluentd-elasticsearch"
local -r fluentd_es_configmap_yaml="${dst_dir}/fluentd-elasticsearch/fluentd-es-configmap.yaml"
update-container-runtime ${fluentd_es_configmap_yaml}
fi
if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
[[ "${LOGGING_DESTINATION:-}" == "gcp" ]]; then
setup-addon-manifests "addons" "fluentd-gcp"
local -r event_exporter_yaml="${dst_dir}/fluentd-gcp/event-exporter.yaml"
local -r fluentd_gcp_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-ds.yaml"
local -r fluentd_gcp_configmap_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-configmap.yaml"
update-event-exporter ${event_exporter_yaml}
fluentd_gcp_version="${FLUENTD_GCP_VERSION:-0.2-1.5.28-1}"
sed -i -e "s@{{ fluentd_gcp_version }}@${fluentd_gcp_version}@g" "${fluentd_gcp_yaml}"
update-prometheus-to-sd-parameters ${event_exporter_yaml}
update-prometheus-to-sd-parameters ${fluentd_gcp_yaml}
start-fluentd-resource-update ${fluentd_gcp_yaml}
update-container-runtime ${fluentd_gcp_configmap_yaml}
fi
if [[ "${ENABLE_CLUSTER_UI:-}" == "true" ]]; then
setup-addon-manifests "addons" "dashboard"
@ -2212,7 +2165,7 @@ EOF
setup-addon-manifests "addons" "node-problem-detector/standalone" "node-problem-detector"
fi
if echo "${ADMISSION_CONTROL:-}" | grep -q "LimitRanger"; then
setup-addon-manifests "admission-controls" "limit-range"
setup-addon-manifests "admission-controls" "limit-range" "gce"
fi
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
setup-addon-manifests "addons" "calico-policy-controller"
@ -2232,6 +2185,17 @@ EOF
local -r metadata_proxy_yaml="${dst_dir}/metadata-proxy/gce/metadata-proxy.yaml"
update-prometheus-to-sd-parameters ${metadata_proxy_yaml}
fi
if [[ "${ENABLE_ISTIO:-}" == "true" ]]; then
if [[ "${ISTIO_AUTH_TYPE:-}" == "MUTUAL_TLS" ]]; then
setup-addon-manifests "addons" "istio/auth"
else
setup-addon-manifests "addons" "istio/noauth"
fi
fi
if [[ -n "${EXTRA_ADDONS_URL:-}" ]]; then
download-extra-addons
setup-addon-manifests "addons" "gce-extras"
fi
# Place addon manager pod manifest.
cp "${src_dir}/kube-addon-manager.yaml" /etc/kubernetes/manifests
@ -2244,14 +2208,11 @@ function start-image-puller {
/etc/kubernetes/manifests/
}
# Starts kube-registry proxy
function start-kube-registry-proxy {
echo "Start kube-registry-proxy"
cp "${KUBE_HOME}/kube-manifests/kubernetes/kube-registry-proxy.yaml" /etc/kubernetes/manifests
}
# Starts a l7 loadbalancing controller for ingress.
# Setups manifests for ingress controller and gce-specific policies for service controller.
function start-lb-controller {
setup-addon-manifests "addons" "loadbalancing"
# Starts a l7 loadbalancing controller for ingress.
if [[ "${ENABLE_L7_LOADBALANCING:-}" == "glbc" ]]; then
echo "Start GCE L7 pod"
prepare-log-file /var/log/glbc.log
@ -2345,7 +2306,7 @@ spec:
- name: vol
containers:
- name: pv-recycler
image: gcr.io/google_containers/busybox:1.27
image: k8s.gcr.io/busybox:1.27
command:
- /bin/sh
args:
@ -2433,10 +2394,6 @@ else
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
start-kube-proxy
fi
# Kube-registry-proxy.
if [[ "${ENABLE_CLUSTER_REGISTRY:-}" == "true" ]]; then
start-kube-registry-proxy
fi
if [[ "${PREPULL_E2E_IMAGES:-}" == "true" ]]; then
start-image-puller
fi

View File

@ -54,6 +54,7 @@ EOF
function download-kube-env {
# Fetch kube-env from GCE metadata server.
(umask 700;
local -r tmp_kube_env="/tmp/kube-env.yaml"
curl --fail --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --silent --show-error \
-H "X-Google-Metadata-Request: True" \
@ -66,10 +67,12 @@ for k,v in yaml.load(sys.stdin).iteritems():
print("readonly {var}={value}".format(var = k, value = pipes.quote(str(v))))
''' < "${tmp_kube_env}" > "${KUBE_HOME}/kube-env")
rm -f "${tmp_kube_env}"
)
}
function download-kube-master-certs {
# Fetch kube-env from GCE metadata server.
(umask 700;
local -r tmp_kube_master_certs="/tmp/kube-master-certs.yaml"
curl --fail --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --silent --show-error \
-H "X-Google-Metadata-Request: True" \
@ -82,6 +85,7 @@ for k,v in yaml.load(sys.stdin).iteritems():
print("readonly {var}={value}".format(var = k, value = pipes.quote(str(v))))
''' < "${tmp_kube_master_certs}" > "${KUBE_HOME}/kube-master-certs")
rm -f "${tmp_kube_master_certs}"
)
}
function validate-hash {
@ -134,6 +138,13 @@ function split-commas {
echo $1 | tr "," "\n"
}
function remount-flexvolume-directory {
local -r flexvolume_plugin_dir=$1
mkdir -p $flexvolume_plugin_dir
mount --bind $flexvolume_plugin_dir $flexvolume_plugin_dir
mount -o remount,exec $flexvolume_plugin_dir
}
function install-gci-mounter-tools {
CONTAINERIZED_MOUNTER_HOME="${KUBE_HOME}/containerized_mounter"
local -r mounter_tar_sha="${DEFAULT_MOUNTER_TAR_SHA}"
@ -223,12 +234,12 @@ function install-kube-manifests {
echo "Downloading k8s manifests tar"
download-or-bust "${manifests_tar_hash}" "${manifests_tar_urls[@]}"
tar xzf "${KUBE_HOME}/${manifests_tar}" -C "${dst_dir}" --overwrite
local -r kube_addon_registry="${KUBE_ADDON_REGISTRY:-gcr.io/google_containers}"
if [[ "${kube_addon_registry}" != "gcr.io/google_containers" ]]; then
local -r kube_addon_registry="${KUBE_ADDON_REGISTRY:-k8s.gcr.io}"
if [[ "${kube_addon_registry}" != "k8s.gcr.io" ]]; then
find "${dst_dir}" -name \*.yaml -or -name \*.yaml.in | \
xargs sed -ri "s@(image:\s.*)gcr.io/google_containers@\1${kube_addon_registry}@"
xargs sed -ri "s@(image:\s.*)k8s.gcr.io@\1${kube_addon_registry}@"
find "${dst_dir}" -name \*.manifest -or -name \*.json | \
xargs sed -ri "s@(image\":\s+\")gcr.io/google_containers@\1${kube_addon_registry}@"
xargs sed -ri "s@(image\":\s+\")k8s.gcr.io@\1${kube_addon_registry}@"
fi
cp "${dst_dir}/kubernetes/gci-trusty/gci-configure-helper.sh" "${KUBE_BIN}/configure-helper.sh"
cp "${dst_dir}/kubernetes/gci-trusty/health-monitor.sh" "${KUBE_BIN}/health-monitor.sh"
@ -332,6 +343,11 @@ function install-kube-binary-config {
# Install gci mounter related artifacts to allow mounting storage volumes in GCI
install-gci-mounter-tools
# Remount the Flexvolume directory with the "exec" option, if needed.
if [[ "${REMOUNT_VOLUME_PLUGIN_DIR:-}" == "true" && -n "${VOLUME_PLUGIN_DIR:-}" ]]; then
remount-flexvolume-directory "${VOLUME_PLUGIN_DIR}"
fi
# Clean up.
rm -rf "${KUBE_HOME}/kubernetes"
rm -f "${KUBE_HOME}/${server_binary_tar}"

View File

@ -32,7 +32,7 @@ set -o pipefail
MOUNTER_IMAGE=${1:-}
MOUNTER_PATH=/home/kubernetes/flexvolume_mounter
VOLUME_PLUGIN_DIR=/etc/srv/kubernetes/kubelet-plugins/volume/exec
VOLUME_PLUGIN_DIR=/home/kubernetes/flexvolume
usage() {
echo "usage: $0 imagename[:tag]"

View File

@ -94,13 +94,21 @@ function create-master-instance-internal() {
preemptible_master="--preemptible --maintenance-policy TERMINATE"
fi
local enable_ip_aliases
if [[ "${NODE_IPAM_MODE:-}" == "CloudAllocator" ]]; then
enable_ip_aliases=true
else
enable_ip_aliases=false
fi
local network=$(make-gcloud-network-argument \
"${NETWORK_PROJECT}" "${REGION}" "${NETWORK}" "${SUBNETWORK:-}" \
"${address:-}" "${ENABLE_IP_ALIASES:-}" "${IP_ALIAS_SIZE:-}")
"${address:-}" "${enable_ip_aliases:-}" "${IP_ALIAS_SIZE:-}")
local metadata="kube-env=${KUBE_TEMP}/master-kube-env.yaml"
metadata="${metadata},user-data=${KUBE_ROOT}/cluster/gce/gci/master.yaml"
metadata="${metadata},configure-sh=${KUBE_ROOT}/cluster/gce/gci/configure.sh"
metadata="${metadata},cluster-location=${KUBE_TEMP}/cluster-location.txt"
metadata="${metadata},cluster-name=${KUBE_TEMP}/cluster-name.txt"
metadata="${metadata},gci-update-strategy=${KUBE_TEMP}/gci-update.txt"
metadata="${metadata},gci-ensure-gke-docker=${KUBE_TEMP}/gci-ensure-gke-docker.txt"

View File

@ -8,8 +8,7 @@ load(
go_binary(
name = "mounter",
importpath = "k8s.io/kubernetes/cluster/gce/gci/mounter",
library = ":go_default_library",
embed = [":go_default_library"],
)
go_library(

View File

@ -13,7 +13,7 @@
# limitations under the License.
TAG=v2
REGISTRY=gcr.io/google_containers
REGISTRY=staging-k8s.gcr.io
IMAGE=gci-mounter
all: container
@ -22,7 +22,7 @@ container:
docker build --pull -t ${REGISTRY}/${IMAGE}:${TAG} .
push:
gcloud docker -- push ${REGISTRY}/${IMAGE}:${TAG}
docker push ${REGISTRY}/${IMAGE}:${TAG}
upload:
./stage-upload.sh ${TAG} ${REGISTRY}/${IMAGE}:${TAG}

View File

@ -22,6 +22,7 @@ function get-node-instance-metadata {
metadata+="kube-env=${KUBE_TEMP}/node-kube-env.yaml,"
metadata+="user-data=${KUBE_ROOT}/cluster/gce/gci/node.yaml,"
metadata+="configure-sh=${KUBE_ROOT}/cluster/gce/gci/configure.sh,"
metadata+="cluster-location=${KUBE_TEMP}/cluster-location.txt,"
metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt,"
metadata+="gci-update-strategy=${KUBE_TEMP}/gci-update.txt,"
metadata+="gci-ensure-gke-docker=${KUBE_TEMP}/gci-ensure-gke-docker.txt,"

View File

@ -0,0 +1,7 @@
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"admin", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"{{kube_user}}", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kubelet", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kube_proxy", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kubecfg", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"client", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"group":"system:serviceaccounts", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}

View File

@ -0,0 +1,93 @@
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "cluster-autoscaler",
"namespace": "kube-system",
"labels": {
"tier": "cluster-management",
"component": "cluster-autoscaler"
}
},
"spec": {
"hostNetwork": true,
"containers": [
{
"name": "cluster-autoscaler",
"image": "k8s.gcr.io/cluster-autoscaler:v1.1.1",
"livenessProbe": {
"httpGet": {
"path": "/health-check",
"port": 8085
},
"initialDelaySeconds": 600,
"periodSeconds": 60
},
"command": [
"./run.sh",
"--kubernetes=http://127.0.0.1:8080?inClusterConfig=f",
"--v=4",
"--logtostderr=true",
"--write-status-configmap=true",
"--balance-similar-node-groups=true",
"{{params}}"
],
"env": [
{
"name": "LOG_OUTPUT",
"value": "/var/log/cluster-autoscaler.log"
}
],
"resources": {
"requests": {
"cpu": "10m",
"memory": "300Mi"
}
},
"volumeMounts": [
{{cloud_config_mount}}
{
"name": "ssl-certs",
"readOnly": true,
"mountPath": "/etc/ssl/certs"
},
{
"name": "usrsharecacerts",
"readOnly": true,
"mountPath": "/usr/share/ca-certificates"
},
{
"name": "logfile",
"mountPath": "/var/log/cluster-autoscaler.log",
"readOnly": false
}
],
"terminationMessagePath": "/dev/termination-log",
"imagePullPolicy": "IfNotPresent"
}
],
"volumes": [
{{cloud_config_volume}}
{
"name": "ssl-certs",
"hostPath": {
"path": "/etc/ssl/certs"
}
},
{
"name": "usrsharecacerts",
"hostPath": {
"path": "/usr/share/ca-certificates"
}
},
{
"name": "logfile",
"hostPath": {
"path": "/var/log/cluster-autoscaler.log",
"type": "FileOrCreate"
}
}
],
"restartPolicy": "Always"
}
}

View File

@ -0,0 +1,111 @@
# e2e-image-puller seeds nodes in an e2e cluster with test images.
apiVersion: v1
kind: Pod
metadata:
name: e2e-image-puller
namespace: kube-system
labels:
name: e2e-image-puller
spec:
containers:
- name: image-puller
resources:
requests:
cpu: 100m
limits:
cpu: 100m
image: k8s.gcr.io/busybox:1.24
# TODO: Replace this with a go script that pulls in parallel?
# Currently it takes ~5m to pull all e2e images, so this is OK, and
# fewer moving parts is always better.
# TODO: Replace the hardcoded image list with an autogen list; the list is
# currently hard-coded for static verification. It was generated via:
# grep -Iiroh "gcr.io/.*" "${KUBE_ROOT}/test/e2e" | \
# sed -e "s/[,\")}]//g" | awk '{print $1}' | sort | uniq | tr '\n' ' '
# We always want the subshell to exit 0 so this pod doesn't end up
# blocking tests in an Error state.
command:
- /bin/sh
- -c
- >
for i in
k8s.gcr.io/alpine-with-bash:1.0
k8s.gcr.io/apparmor-loader:0.1
k8s.gcr.io/busybox:1.24
k8s.gcr.io/dnsutils:e2e
k8s.gcr.io/e2e-net-amd64:1.0
k8s.gcr.io/echoserver:1.6
k8s.gcr.io/eptest:0.1
k8s.gcr.io/fakegitserver:0.1
k8s.gcr.io/galera-install:0.1
k8s.gcr.io/invalid-image:invalid-tag
k8s.gcr.io/iperf:e2e
k8s.gcr.io/jessie-dnsutils:e2e
k8s.gcr.io/k8s-dns-dnsmasq-amd64:1.14.5
k8s.gcr.io/liveness:e2e
k8s.gcr.io/logs-generator:v0.1.0
k8s.gcr.io/mounttest:0.8
k8s.gcr.io/mounttest-user:0.5
k8s.gcr.io/mysql-galera:e2e
k8s.gcr.io/mysql-healthz:1.0
k8s.gcr.io/netexec:1.4
k8s.gcr.io/netexec:1.5
k8s.gcr.io/netexec:1.7
k8s.gcr.io/nettest:1.7
k8s.gcr.io/nginx:1.7.9
k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.1
k8s.gcr.io/nginx-slim:0.7
k8s.gcr.io/nginx-slim:0.8
k8s.gcr.io/node-problem-detector:v0.3.0
k8s.gcr.io/pause
k8s.gcr.io/porter:4524579c0eb935c056c8e75563b4e1eda31587e0
k8s.gcr.io/portforwardtester:1.2
k8s.gcr.io/redis-install-3.2.0:e2e
k8s.gcr.io/resource_consumer:beta4
k8s.gcr.io/resource_consumer/controller:beta4
gcr.io/kubernetes-e2e-test-images/serve-hostname-amd64:1.1
gcr.io/kubernetes-e2e-test-images/hostexec-amd64:1.1
k8s.gcr.io/servicelb:0.1
k8s.gcr.io/test-webserver:e2e
k8s.gcr.io/update-demo:kitten
k8s.gcr.io/update-demo:nautilus
k8s.gcr.io/volume-ceph:0.1
k8s.gcr.io/volume-gluster:0.2
k8s.gcr.io/volume-iscsi:0.1
k8s.gcr.io/volume-nfs:0.8
k8s.gcr.io/volume-rbd:0.1
k8s.gcr.io/zookeeper-install-3.5.0-alpha:e2e
gcr.io/google_samples/gb-redisslave:nonexistent
; do echo $(date '+%X') pulling $i; docker pull $i 1>/dev/null; done; exit 0;
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/run/docker.sock
name: socket
- mountPath: /usr/bin/docker
name: docker
# Add a container that runs a health-check
- name: nethealth-check
resources:
requests:
cpu: 100m
limits:
cpu: 100m
image: k8s.gcr.io/kube-nethealth-amd64:1.0
command:
- /bin/sh
- -c
- "/usr/bin/nethealth || true"
volumes:
- hostPath:
path: /var/run/docker.sock
type: Socket
name: socket
- hostPath:
path: /usr/bin/docker
type: File
name: docker
# This pod is really fire-and-forget.
restartPolicy: OnFailure
# This pod needs hostNetworking for true VM perf measurement as well as avoiding cbr0 issues
hostNetwork: true

View File

@ -0,0 +1,91 @@
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"etcd-server{{ suffix }}",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": ""
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "etcd-container",
"image": "{{ pillar.get('etcd_docker_repository', 'k8s.gcr.io/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.2.14') }}",
"resources": {
"requests": {
"cpu": {{ cpulimit }}
}
},
"command": [
"/bin/sh",
"-c",
"if [ -e /usr/local/bin/migrate-if-needed.sh ]; then /usr/local/bin/migrate-if-needed.sh 1>>/var/log/etcd{{ suffix }}.log 2>&1; fi; exec /usr/local/bin/etcd --name etcd-{{ hostname }} --listen-peer-urls {{ etcd_protocol }}://{{ host_ip }}:{{ server_port }} --initial-advertise-peer-urls {{ etcd_protocol }}://{{ hostname }}:{{ server_port }} --advertise-client-urls http://127.0.0.1:{{ port }} --listen-client-urls http://127.0.0.1:{{ port }} {{ quota_bytes }} --data-dir /var/etcd/data{{ suffix }} --initial-cluster-state {{ cluster_state }} --initial-cluster {{ etcd_cluster }} {{ etcd_creds }} 1>>/var/log/etcd{{ suffix }}.log 2>&1"
],
"env": [
{ "name": "TARGET_STORAGE",
"value": "{{ pillar.get('storage_backend', 'etcd3') }}"
},
{ "name": "TARGET_VERSION",
"value": "{{ pillar.get('etcd_version', '3.2.14') }}"
},
{ "name": "DATA_DIRECTORY",
"value": "/var/etcd/data{{ suffix }}"
},
{ "name": "INITIAL_CLUSTER",
"value": "{{ etcd_cluster }}"
}
],
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": {{ port }},
"path": "/health"
},
"initialDelaySeconds": {{ liveness_probe_initial_delay }},
"timeoutSeconds": 15
},
"ports": [
{ "name": "serverport",
"containerPort": {{ server_port }},
"hostPort": {{ server_port }}
},
{ "name": "clientport",
"containerPort": {{ port }},
"hostPort": {{ port }}
}
],
"volumeMounts": [
{ "name": "varetcd",
"mountPath": "/var/etcd",
"readOnly": false
},
{ "name": "varlogetcd",
"mountPath": "/var/log/etcd{{ suffix }}.log",
"readOnly": false
},
{ "name": "etc",
"mountPath": "{{ srv_kube_path }}",
"readOnly": false
}
]
}
],
"volumes":[
{ "name": "varetcd",
"hostPath": {
"path": "/mnt/master-pd/var/etcd"}
},
{ "name": "varlogetcd",
"hostPath": {
"path": "/var/log/etcd{{ suffix }}.log",
"type": "FileOrCreate"}
},
{ "name": "etc",
"hostPath": {
"path": "{{ srv_kube_path }}"}
}
]
}}

View File

@ -0,0 +1,56 @@
apiVersion: v1
kind: Pod
metadata:
name: l7-lb-controller-v0.9.8-alpha.2
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
k8s-app: gcp-lb-controller
version: v0.9.8-alpha.2
kubernetes.io/name: "GLBC"
spec:
terminationGracePeriodSeconds: 600
hostNetwork: true
containers:
- image: k8s.gcr.io/ingress-gce-glbc-amd64:0.9.8-alpha.2
livenessProbe:
httpGet:
path: /healthz
port: 8086
scheme: HTTP
initialDelaySeconds: 30
# healthz reaches out to GCE
periodSeconds: 30
timeoutSeconds: 15
successThreshold: 1
failureThreshold: 5
name: l7-lb-controller
volumeMounts:
- mountPath: /etc/gce.conf
name: cloudconfig
readOnly: true
- mountPath: /var/log/glbc.log
name: logfile
readOnly: false
resources:
# Request is set to accommodate this pod alongside the other
# master components on a single core master.
# TODO: Make resource requirements depend on the size of the cluster
requests:
cpu: 10m
memory: 50Mi
command:
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
- sh
- -c
- 'exec /glbc --verbose --apiserver-host=http://localhost:8080 --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1'
volumes:
- hostPath:
path: /etc/gce.conf
type: FileOrCreate
name: cloudconfig
- hostPath:
path: /var/log/glbc.log
type: FileOrCreate
name: logfile

View File

@ -0,0 +1,38 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-addon-manager
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
component: kube-addon-manager
spec:
hostNetwork: true
containers:
- name: kube-addon-manager
# When updating version also bump it in:
# - test/kubemark/resources/manifests/kube-addon-manager.yaml
image: k8s.gcr.io/kube-addon-manager:v8.6
command:
- /bin/bash
- -c
- exec /opt/kube-addons.sh 1>>/var/log/kube-addon-manager.log 2>&1
resources:
requests:
cpu: 5m
memory: 50Mi
volumeMounts:
- mountPath: /etc/kubernetes/
name: addons
readOnly: true
- mountPath: /var/log
name: varlog
readOnly: false
volumes:
- hostPath:
path: /etc/kubernetes/
name: addons
- hostPath:
path: /var/log
name: varlog

View File

@ -0,0 +1,136 @@
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"kube-apiserver",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": ""
},
"labels": {
"tier": "control-plane",
"component": "kube-apiserver"
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "kube-apiserver",
"image": "{{pillar['kube_docker_registry']}}/kube-apiserver:{{pillar['kube-apiserver_docker_tag']}}",
"resources": {
"requests": {
"cpu": "250m"
}
},
"command": [
"/bin/sh",
"-c",
"exec /usr/local/bin/kube-apiserver {{params}} --allow-privileged={{pillar['allow_privileged']}} 1>>/var/log/kube-apiserver.log 2>&1"
],
{{container_env}}
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": 8080,
"path": "/healthz"
},
"initialDelaySeconds": {{liveness_probe_initial_delay}},
"timeoutSeconds": 15
},
"ports":[
{ "name": "https",
"containerPort": {{secure_port}},
"hostPort": {{secure_port}}},{
"name": "local",
"containerPort": 8080,
"hostPort": 8080}
],
"volumeMounts": [
{{cloud_config_mount}}
{{additional_cloud_config_mount}}
{{webhook_config_mount}}
{{webhook_authn_config_mount}}
{{audit_policy_config_mount}}
{{audit_webhook_config_mount}}
{{admission_controller_config_mount}}
{{image_policy_webhook_config_mount}}
{ "name": "srvkube",
"mountPath": "{{srv_kube_path}}",
"readOnly": true},
{ "name": "logfile",
"mountPath": "/var/log/kube-apiserver.log",
"readOnly": false},
{ "name": "auditlogfile",
"mountPath": "/var/log/kube-apiserver-audit.log",
"readOnly": false},
{ "name": "etcssl",
"mountPath": "/etc/ssl",
"readOnly": true},
{ "name": "usrsharecacerts",
"mountPath": "/usr/share/ca-certificates",
"readOnly": true},
{ "name": "varssl",
"mountPath": "/var/ssl",
"readOnly": true},
{ "name": "etcopenssl",
"mountPath": "/etc/openssl",
"readOnly": true},
{ "name": "etcpki",
"mountPath": "/etc/srv/pki",
"readOnly": true},
{ "name": "srvsshproxy",
"mountPath": "{{srv_sshproxy_path}}",
"readOnly": false}
]
}
],
"volumes":[
{{cloud_config_volume}}
{{additional_cloud_config_volume}}
{{webhook_config_volume}}
{{webhook_authn_config_volume}}
{{audit_policy_config_volume}}
{{audit_webhook_config_volume}}
{{admission_controller_config_volume}}
{{image_policy_webhook_config_volume}}
{ "name": "srvkube",
"hostPath": {
"path": "{{srv_kube_path}}"}
},
{ "name": "logfile",
"hostPath": {
"path": "/var/log/kube-apiserver.log",
"type": "FileOrCreate"}
},
{ "name": "auditlogfile",
"hostPath": {
"path": "/var/log/kube-apiserver-audit.log",
"type": "FileOrCreate"}
},
{ "name": "etcssl",
"hostPath": {
"path": "/etc/ssl"}
},
{ "name": "usrsharecacerts",
"hostPath": {
"path": "/usr/share/ca-certificates"}
},
{ "name": "varssl",
"hostPath": {
"path": "/var/ssl"}
},
{ "name": "etcopenssl",
"hostPath": {
"path": "/etc/openssl"}
},
{ "name": "etcpki",
"hostPath": {
"path": "/etc/srv/pki"}
},
{ "name": "srvsshproxy",
"hostPath": {
"path": "{{srv_sshproxy_path}}"}
}
]
}}

View File

@ -0,0 +1,105 @@
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"kube-controller-manager",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": ""
},
"labels": {
"tier": "control-plane",
"component": "kube-controller-manager"
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "kube-controller-manager",
"image": "{{pillar['kube_docker_registry']}}/kube-controller-manager:{{pillar['kube-controller-manager_docker_tag']}}",
"resources": {
"requests": {
"cpu": "200m"
}
},
"command": [
"/bin/sh",
"-c",
"exec /usr/local/bin/kube-controller-manager {{params}} 1>>/var/log/kube-controller-manager.log 2>&1"
],
{{container_env}}
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": 10252,
"path": "/healthz"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15
},
"volumeMounts": [
{{cloud_config_mount}}
{{additional_cloud_config_mount}}
{{pv_recycler_mount}}
{ "name": "srvkube",
"mountPath": "{{srv_kube_path}}",
"readOnly": true},
{{flexvolume_hostpath_mount}}
{ "name": "logfile",
"mountPath": "/var/log/kube-controller-manager.log",
"readOnly": false},
{ "name": "etcssl",
"mountPath": "/etc/ssl",
"readOnly": true},
{ "name": "usrsharecacerts",
"mountPath": "/usr/share/ca-certificates",
"readOnly": true},
{ "name": "varssl",
"mountPath": "/var/ssl",
"readOnly": true},
{ "name": "etcopenssl",
"mountPath": "/etc/openssl",
"readOnly": true},
{ "name": "etcpki",
"mountPath": "/etc/pki",
"readOnly": true}
]
}
],
"volumes":[
{{cloud_config_volume}}
{{additional_cloud_config_volume}}
{{pv_recycler_volume}}
{ "name": "srvkube",
"hostPath": {
"path": "{{srv_kube_path}}"}
},
{{flexvolume_hostpath}}
{ "name": "logfile",
"hostPath": {
"path": "/var/log/kube-controller-manager.log",
"type": "FileOrCreate"}
},
{ "name": "etcssl",
"hostPath": {
"path": "/etc/ssl"}
},
{ "name": "usrsharecacerts",
"hostPath": {
"path": "/usr/share/ca-certificates"}
},
{ "name": "varssl",
"hostPath": {
"path": "/var/ssl"}
},
{ "name": "etcopenssl",
"hostPath": {
"path": "/etc/openssl"}
},
{ "name": "etcpki",
"hostPath": {
"path": "/etc/pki"}
}
]
}}

View File

@ -0,0 +1,78 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-proxy
namespace: kube-system
# This annotation ensures that kube-proxy does not get evicted if the node
# supports critical pod annotation based priority scheme.
# Note that kube-proxy runs as a static pod so this annotation does NOT have
# any effect on rescheduler (default scheduler and rescheduler are not
# involved in scheduling kube-proxy).
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
tier: node
component: kube-proxy
spec:
{{pod_priority}}
hostNetwork: true
tolerations:
- operator: "Exists"
effect: "NoExecute"
- operator: "Exists"
effect: "NoSchedule"
containers:
- name: kube-proxy
image: {{pillar['kube_docker_registry']}}/kube-proxy:{{pillar['kube-proxy_docker_tag']}}
resources:
requests:
cpu: {{ cpurequest }}
command:
- /bin/sh
- -c
- exec kube-proxy {{api_servers_with_port}} {{kubeconfig}} {{cluster_cidr}} --resource-container="" --oom-score-adj=-998 {{params}} 1>>/var/log/kube-proxy.log 2>&1
{{container_env}}
{{kube_cache_mutation_detector_env_name}}
{{kube_cache_mutation_detector_env_value}}
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/ssl/certs
name: etc-ssl-certs
readOnly: true
- mountPath: /usr/share/ca-certificates
name: usr-ca-certs
readOnly: true
- mountPath: /var/log
name: varlog
readOnly: false
- mountPath: /var/lib/kube-proxy/kubeconfig
name: kubeconfig
readOnly: false
- mountPath: /run/xtables.lock
name: iptableslock
readOnly: false
- mountPath: /lib/modules
name: lib-modules
readOnly: true
volumes:
- hostPath:
path: /usr/share/ca-certificates
name: usr-ca-certs
- hostPath:
path: /etc/ssl/certs
name: etc-ssl-certs
- hostPath:
path: /var/lib/kube-proxy/kubeconfig
type: FileOrCreate
name: kubeconfig
- hostPath:
path: /var/log
name: varlog
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: iptableslock
- name: lib-modules
hostPath:
path: /lib/modules

View File

@ -0,0 +1,64 @@
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"kube-scheduler",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": ""
},
"labels": {
"tier": "control-plane",
"component": "kube-scheduler"
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "kube-scheduler",
"image": "{{pillar['kube_docker_registry']}}/kube-scheduler:{{pillar['kube-scheduler_docker_tag']}}",
"resources": {
"requests": {
"cpu": "75m"
}
},
"command": [
"/bin/sh",
"-c",
"exec /usr/local/bin/kube-scheduler {{params}} 1>>/var/log/kube-scheduler.log 2>&1"
],
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": 10251,
"path": "/healthz"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15
},
"volumeMounts": [
{
"name": "logfile",
"mountPath": "/var/log/kube-scheduler.log",
"readOnly": false
},
{
"name": "srvkube",
"mountPath": "{{srv_kube_path}}",
"readOnly": true
}
]
}
],
"volumes":[
{
"name": "srvkube",
"hostPath": {"path": "{{srv_kube_path}}"}
},
{
"name": "logfile",
"hostPath": {"path": "/var/log/kube-scheduler.log", "type": "FileOrCreate"}
}
]
}}

View File

@ -0,0 +1,36 @@
apiVersion: v1
kind: Pod
metadata:
name: rescheduler-v0.3.1
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
k8s-app: rescheduler
version: v0.3.1
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Rescheduler"
spec:
hostNetwork: true
containers:
- image: k8s.gcr.io/rescheduler:v0.3.1
name: rescheduler
volumeMounts:
- mountPath: /var/log/rescheduler.log
name: logfile
readOnly: false
# TODO: Make resource requirements depend on the size of the cluster
resources:
requests:
cpu: 10m
memory: 100Mi
command:
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
- sh
- -c
- 'exec /rescheduler --running-in-cluster=false 1>>/var/log/rescheduler.log 2>&1'
volumes:
- hostPath:
path: /var/log/rescheduler.log
type: FileOrCreate
name: logfile

171
vendor/k8s.io/kubernetes/cluster/gce/upgrade-aliases.sh generated vendored Executable file
View File

@ -0,0 +1,171 @@
#!/bin/bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# !!!EXPERIMENTAL!!! Upgrade a K8s cluster from routes to IP aliases for
# node connectivity on GCE. This is only for migration.
set -o errexit
set -o nounset
set -o pipefail
if [[ "${KUBERNETES_PROVIDER:-gce}" != "gce" ]]; then
echo "ERR: KUBERNETES_PROVIDER must be gce" >&2
exit 1
fi
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/hack/lib/util.sh"
source "${KUBE_ROOT}/cluster/kube-util.sh"
# Print the number of routes used for K8s cluster node connectivity.
#
# Assumed vars:
# PROJECT
function get-k8s-node-routes-count() {
local k8s_node_routes_count=$(gcloud compute routes list \
--project=${PROJECT} --filter='description=k8s-node-route' \
--format='value(name)' | wc -l)
echo -n "${k8s_node_routes_count}"
}
# Detect the subnetwork where the K8s cluster resides.
#
# Assumed vars:
# KUBE_MASTER
# PROJECT
# ZONE
# Vars set:
# IP_ALIAS_SUBNETWORK
function detect-k8s-subnetwork() {
local subnetwork_url=$(gcloud compute instances describe \
${KUBE_MASTER} --project=${PROJECT} --zone=${ZONE} \
--format='value(networkInterfaces[0].subnetwork)')
if [ -n ${subnetwork_url} ]; then
IP_ALIAS_SUBNETWORK=$(echo ${subnetwork_url##*/})
fi
}
# Set IP_ALIAS_SUBNETWORK's allowSubnetCidrRoutesOverlap to a boolean value.
# $1: true or false for the desired allowSubnetCidrRoutesOverlap.
#
# Assumed vars:
# IP_ALIAS_SUBNETWORK
# GCE_API_ENDPOINT
# PROJECT
# REGION
function set-allow-subnet-cidr-routes-overlap() {
local allow_subnet_cidr_routes_overlap
allow_subnet_cidr_routes_overlap=$(gcloud beta compute networks subnets \
describe ${IP_ALIAS_SUBNETWORK} --project=${PROJECT} --region=${REGION} \
--format='value(allowSubnetCidrRoutesOverlap)')
local allow_overlap=$1
if [ ${allow_subnet_cidr_routes_overlap,,} = ${allow_overlap} ]; then
echo "Subnet ${IP_ALIAS_SUBNETWORK}'s allowSubnetCidrRoutesOverlap is already set as $1"
return
fi
echo "Setting subnet \"${IP_ALIAS_SUBNETWORK}\" allowSubnetCidrRoutesOverlap to $1"
local fingerprint=$(gcloud beta compute networks subnets describe \
${IP_ALIAS_SUBNETWORK} --project=${PROJECT} --region=${REGION} \
--format='value(fingerprint)')
local access_token=$(gcloud auth print-access-token)
local request="{\"allowSubnetCidrRoutesOverlap\":$1, \"fingerprint\":\"${fingerprint}\"}"
local subnetwork_url="${GCE_API_ENDPOINT}projects/${PROJECT}/regions/${REGION}/subnetworks/${IP_ALIAS_SUBNETWORK}"
until curl -s --header "Content-Type: application/json" --header "Authorization: Bearer ${access_token}" \
-X PATCH -d "${request}" "${subnetwork_url}" --output /dev/null; do
printf "."
sleep 1
done
}
# Add secondary ranges to K8s subnet.
#
# Assumed vars:
# IP_ALIAS_SUBNETWORK
# PROJECT
# REGION
# CLUSTER_IP_RANGE
# SERVICE_CLUSTER_IP_RANGE
function add-k8s-subnet-secondary-ranges() {
local secondary_ranges=$(gcloud beta compute networks subnets describe "${IP_ALIAS_SUBNETWORK}" \
--project="${PROJECT}" --region="${REGION}" \
--format='value(secondaryIpRanges)')
if [[ "${secondary_ranges}" =~ "pods-default" && "${secondary_ranges}" =~ "services-default" ]]; then
echo "${secondary_ranges} already contains both pods-default and services-default secondary ranges"
return
fi
echo "Adding secondary ranges: pods-default (${CLUSTER_IP_RANGE}), services-default (${SERVICE_CLUSTER_IP_RANGE})"
until gcloud beta compute networks subnets update ${IP_ALIAS_SUBNETWORK} \
--project=${PROJECT} --region=${REGION} \
--add-secondary-ranges="pods-default=${CLUSTER_IP_RANGE},services-default=${SERVICE_CLUSTER_IP_RANGE}"; do
printf "."
sleep 1
done
}
# Delete all K8s node routes.
#
# Assumed vars:
# PROJECT
function delete-k8s-node-routes() {
local -a routes
local -r batch=200
routes=( $(gcloud compute routes list \
--project=${PROJECT} --filter='description=k8s-node-route' \
--format='value(name)') )
while (( "${#routes[@]}" > 0 )); do
echo Deleting k8s node routes "${routes[*]::${batch}}"
gcloud compute routes delete --project "${PROJECT}" --quiet "${routes[@]::${batch}}"
routes=( "${routes[@]:${batch}}" )
done
}
detect-project
detect-master
k8s_node_routes_count=$(get-k8s-node-routes-count)
if [[ "${k8s_node_routes_count}" -eq 0 ]]; then
echo "No k8s node routes found and IP alias should already be enabled. Exiting..."
exit 0
fi
echo "Found ${k8s_node_routes_count} K8s node routes. Proceeding to upgrade them to IP aliases based connectivity..."
detect-k8s-subnetwork
if [ -z ${IP_ALIAS_SUBNETWORK} ]; then
echo "No k8s cluster subnetwork found. Exiting..."
exit 1
fi
echo "k8s cluster sits on subnetwork \"${IP_ALIAS_SUBNETWORK}\""
set-allow-subnet-cidr-routes-overlap true
add-k8s-subnet-secondary-ranges
echo "Changing K8s master envs and restarting..."
export KUBE_GCE_IP_ALIAS_SUBNETWORK=${IP_ALIAS_SUBNETWORK}
export KUBE_GCE_NODE_IPAM_MODE="IPAMFromCluster"
export KUBE_GCE_ENABLE_IP_ALIASES=true
export SECONDARY_RANGE_NAME="pods-default"
export STORAGE_BACKEND="etcd3"
export STORAGE_MEDIA_TYPE="application/vnd.kubernetes.protobuf"
export ETCD_IMAGE=3.2.14
export ETCD_VERSION=3.2.14
# Upgrade master with updated kube envs
${KUBE_ROOT}/cluster/gce/upgrade.sh -M -l
delete-k8s-node-routes
set-allow-subnet-cidr-routes-overlap false

View File

@ -40,7 +40,7 @@ function usage() {
echo " -N: Upgrade nodes only"
echo " -P: Node upgrade prerequisites only (create a new instance template)"
echo " -c: Upgrade NODE_UPGRADE_PARALLELISM nodes in parallel (default=1) within a single instance group. The MIGs themselves are dealt serially."
echo " -o: Use os distro sepcified in KUBE_NODE_OS_DISTRIBUTION for new nodes. Options include 'debian' or 'gci'"
echo " -o: Use os distro specified in KUBE_NODE_OS_DISTRIBUTION for new nodes. Options include 'debian' or 'gci'"
echo " -l: Use local(dev) binaries. This is only supported for master upgrades."
echo ""
echo ' Version number or publication is either a proper version number'
@ -151,6 +151,7 @@ function prepare-upgrade() {
detect-project
detect-subnetworks
detect-node-names # sets INSTANCE_GROUPS
write-cluster-location
write-cluster-name
tars_from_version
}
@ -190,7 +191,6 @@ function get-node-os() {
# ZONE
#
# Vars set:
# KUBELET_TOKEN
# KUBE_PROXY_TOKEN
# NODE_PROBLEM_DETECTOR_TOKEN
# CA_CERT_BASE64
@ -230,7 +230,6 @@ function setup-base-image() {
# Vars set:
# SANITIZED_VERSION
# INSTANCE_GROUPS
# KUBELET_TOKEN
# KUBE_PROXY_TOKEN
# NODE_PROBLEM_DETECTOR_TOKEN
# CA_CERT_BASE64
@ -253,7 +252,6 @@ function prepare-node-upgrade() {
# Get required node env vars from exiting template.
local node_env=$(get-node-env)
KUBELET_TOKEN=$(get-env-val "${node_env}" "KUBELET_TOKEN")
KUBE_PROXY_TOKEN=$(get-env-val "${node_env}" "KUBE_PROXY_TOKEN")
NODE_PROBLEM_DETECTOR_TOKEN=$(get-env-val "${node_env}" "NODE_PROBLEM_DETECTOR_TOKEN")
CA_CERT_BASE64=$(get-env-val "${node_env}" "CA_CERT")

File diff suppressed because it is too large Load Diff