vendor cleanup: remove unused,non-go and test files

This commit is contained in:
Madhu Rajanna
2019-01-16 00:05:52 +05:30
parent 52cf4aa902
commit b10ba188e7
15421 changed files with 17 additions and 4208853 deletions

View File

@ -1,22 +0,0 @@
package(default_visibility = ["//visibility:public"])
load("@io_kubernetes_build//defs:build.bzl", "release_filegroup")
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//cluster/gce/addons:all-srcs",
"//cluster/gce/gci:all-srcs",
"//cluster/gce/manifests:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,16 +0,0 @@
reviewers:
- bowei
- gmarek
- jszczepkowski
- vishh
- mwielgus
- MaciekPytel
- jingax10
approvers:
- bowei
- gmarek
- jszczepkowski
- vishh
- mwielgus
- MaciekPytel
- jingax10

View File

@ -1,38 +0,0 @@
package(default_visibility = ["//visibility:public"])
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
filegroup(
name = "addon-srcs",
srcs = glob(
[
"**/*.json",
"**/*.yaml",
"**/*.yaml.in",
],
exclude = ["**/*demo*/**"],
),
)
pkg_tar(
name = "addons",
srcs = [
":addon-srcs",
],
extension = "tar.gz",
mode = "0644",
strip_prefix = ".",
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,7 +0,0 @@
# GCE Cluster addons
These cluster add-ons are specific to GCE and GKE clusters. The GCE-specific addon directory is
merged with the general cluster addon directory at release, so addon paths (relative to the addon
directory) must be unique across the 2 directory structures.
More details on addons in general can be found [here](../../addons/README.md).

View File

@ -1,10 +0,0 @@
apiVersion: "v1"
kind: "LimitRange"
metadata:
name: "limits"
namespace: default
spec:
limits:
- type: "Container"
defaultRequest:
cpu: "100m"

View File

@ -1,30 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cloud-provider
subjects:
- kind: ServiceAccount
name: cloud-provider
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cloud-provider
subjects:
- kind: ServiceAccount
name: cloud-provider
namespace: kube-system

View File

@ -1,35 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- get
- patch
- update
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update

View File

@ -1,15 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gce:podsecuritypolicy:kube-proxy
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gce:podsecuritypolicy:privileged
subjects:
- kind: ServiceAccount
name: kube-proxy
namespace: kube-system

View File

@ -1,17 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gce:podsecuritypolicy:unprivileged-addon
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: gce:podsecuritypolicy:unprivileged-addon
subjects:
- kind: Group
# All service accounts in the kube-system namespace are allowed to use this.
name: system:serviceaccounts:kube-system
apiGroup: rbac.authorization.k8s.io

View File

@ -1,24 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gce:podsecuritypolicy:nodes
namespace: kube-system
annotations:
kubernetes.io/description: 'Allow nodes to create privileged pods. Should
be used in combination with the NodeRestriction admission plugin to limit
nodes to mirror pods bound to themselves.'
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: 'true'
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gce:podsecuritypolicy:privileged
subjects:
- kind: Group
apiGroup: rbac.authorization.k8s.io
name: system:nodes
- kind: User
apiGroup: rbac.authorization.k8s.io
# Legacy node ID
name: kubelet

View File

@ -1,18 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
# The persistent volume binder creates recycler pods in the default namespace,
# but the addon manager only creates namespaced objects in the kube-system
# namespace, so this is a ClusterRoleBinding.
kind: ClusterRoleBinding
metadata:
name: gce:podsecuritypolicy:persistent-volume-binder
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gce:podsecuritypolicy:persistent-volume-binder
subjects:
- kind: ServiceAccount
name: persistent-volume-binder
namespace: kube-system

View File

@ -1,20 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
# The persistent volume binder creates recycler pods in the default namespace,
# but the addon manager only creates namespaced objects in the kube-system
# namespace, so this is a ClusterRole.
kind: ClusterRole
metadata:
name: gce:podsecuritypolicy:persistent-volume-binder
namespace: default
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- policy
resourceNames:
- gce.persistent-volume-binder
resources:
- podsecuritypolicies
verbs:
- use

View File

@ -1,29 +0,0 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: gce.persistent-volume-binder
annotations:
kubernetes.io/description: 'Policy used by the persistent-volume-binder
(a.k.a. persistentvolume-controller) to run recycler pods.'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default,docker/default'
labels:
kubernetes.io/cluster-service: 'true'
addonmanager.kubernetes.io/mode: Reconcile
spec:
privileged: false
volumes:
- 'nfs'
- 'secret' # Required for service account credentials.
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: false

View File

@ -1,16 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: gce:podsecuritypolicy:privileged
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- policy
resourceNames:
- gce.privileged
resources:
- podsecuritypolicies
verbs:
- use

View File

@ -1,33 +0,0 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: gce.privileged
annotations:
kubernetes.io/description: 'privileged allows full unrestricted access to
pod features, as if the PodSecurityPolicy controller was not enabled.'
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
privileged: true
allowPrivilegeEscalation: true
allowedCapabilities:
- '*'
volumes:
- '*'
hostNetwork: true
hostPorts:
- min: 0
max: 65535
hostIPC: true
hostPID: true
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: false

View File

@ -1,17 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: gce:podsecuritypolicy:unprivileged-addon
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- policy
resourceNames:
- gce.unprivileged-addon
resources:
- podsecuritypolicies
verbs:
- use

View File

@ -1,38 +0,0 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: gce.unprivileged-addon
annotations:
kubernetes.io/description: 'This policy grants the minimum amount of
privilege necessary to run non-privileged kube-system pods. This policy is
not intended for use outside of kube-system, and may include further
restrictions in the future.'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default,docker/default'
# 'runtime/default' is already the default, but must be filled in on the
# pod to pass admission.
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
labels:
kubernetes.io/cluster-service: 'true'
addonmanager.kubernetes.io/mode: Reconcile
spec:
privileged: false
allowPrivilegeEscalation: false
volumes:
- 'emptyDir'
- 'configMap'
- 'secret'
hostNetwork: false
hostIPC: false
hostPID: false
# TODO: The addons using this profile should not run as root.
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: false

View File

@ -1,116 +0,0 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Vars assumed:
# NUM_NODES
function get-master-size {
local suggested_master_size=1
if [[ "${NUM_NODES}" -gt "5" ]]; then
suggested_master_size=2
fi
if [[ "${NUM_NODES}" -gt "10" ]]; then
suggested_master_size=4
fi
if [[ "${NUM_NODES}" -gt "100" ]]; then
suggested_master_size=8
fi
if [[ "${NUM_NODES}" -gt "250" ]]; then
suggested_master_size=16
fi
if [[ "${NUM_NODES}" -gt "500" ]]; then
suggested_master_size=32
fi
if [[ "${NUM_NODES}" -gt "3000" ]]; then
suggested_master_size=64
fi
echo "${suggested_master_size}"
}
# Vars assumed:
# NUM_NODES
function get-master-root-disk-size() {
local suggested_master_root_disk_size="20GB"
if [[ "${NUM_NODES}" -gt "1000" ]]; then
suggested_master_root_disk_size="50GB"
fi
if [[ "${NUM_NODES}" -gt "2000" ]]; then
suggested_master_root_disk_size="100GB"
fi
echo "${suggested_master_root_disk_size}"
}
# Vars assumed:
# NUM_NODES
function get-master-disk-size() {
local suggested_master_disk_size="20GB"
if [[ "${NUM_NODES}" -gt "1000" ]]; then
suggested_master_disk_size="100GB"
fi
if [[ "${NUM_NODES}" -gt "2000" ]]; then
suggested_master_disk_size="200GB"
fi
echo "${suggested_master_disk_size}"
}
function get-node-ip-range {
if [[ -n "${NODE_IP_RANGE:-}" ]]; then
>&2 echo "Using user provided NODE_IP_RANGE: ${NODE_IP_RANGE}"
echo "${NODE_IP_RANGE}"
return
fi
local suggested_range="10.40.0.0/22"
if [[ "${NUM_NODES}" -gt 1000 ]]; then
suggested_range="10.40.0.0/21"
fi
if [[ "${NUM_NODES}" -gt 2000 ]]; then
suggested_range="10.40.0.0/20"
fi
if [[ "${NUM_NODES}" -gt 4000 ]]; then
suggested_range="10.40.0.0/19"
fi
echo "${suggested_range}"
}
function get-cluster-ip-range {
local suggested_range="10.64.0.0/14"
if [[ "${NUM_NODES}" -gt 1000 ]]; then
suggested_range="10.64.0.0/13"
fi
if [[ "${NUM_NODES}" -gt 2000 ]]; then
suggested_range="10.64.0.0/12"
fi
if [[ "${NUM_NODES}" -gt 4000 ]]; then
suggested_range="10.64.0.0/11"
fi
echo "${suggested_range}"
}
# Calculate ip alias range based on max number of pods.
# Let pow be the smallest integer which is bigger or equal to log2($1 * 2).
# (32 - pow) will be returned.
#
# $1: The number of max pods limitation.
function get-alias-range-size() {
for pow in {0..31}; do
if (( 1 << $pow >= $1 * 2 )); then
echo $((32 - pow))
return 0
fi
done
}
# NOTE: Avoid giving nodes empty scopes, because kubelet needs a service account
# in order to initialize properly.
NODE_SCOPES="${NODE_SCOPES:-monitoring,logging-write,storage-ro}"

View File

@ -1,455 +0,0 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(jbeda): Provide a way to override project
# gcloud multiplexing for shared GCE/GKE tests.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/gce/config-common.sh"
# Specifying KUBE_GCE_API_ENDPOINT will override the default GCE Compute API endpoint (https://www.googleapis.com/compute/v1/).
# This endpoint has to be pointing to v1 api. For example, https://www.googleapis.com/compute/staging_v1/
GCE_API_ENDPOINT=${KUBE_GCE_API_ENDPOINT:-}
GCLOUD=gcloud
ZONE=${KUBE_GCE_ZONE:-us-central1-b}
REGION=${ZONE%-*}
RELEASE_REGION_FALLBACK=${RELEASE_REGION_FALLBACK:-false}
REGIONAL_KUBE_ADDONS=${REGIONAL_KUBE_ADDONS:-true}
NODE_SIZE=${NODE_SIZE:-n1-standard-2}
NUM_NODES=${NUM_NODES:-3}
MASTER_SIZE=${MASTER_SIZE:-n1-standard-$(get-master-size)}
MASTER_MIN_CPU_ARCHITECTURE=${MASTER_MIN_CPU_ARCHITECTURE:-} # To allow choosing better architectures.
MASTER_DISK_TYPE=pd-ssd
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-$(get-master-disk-size)}
MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-$(get-master-root-disk-size)}
NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard}
NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
NODE_LOCAL_SSDS=${NODE_LOCAL_SSDS:-0}
# An extension to local SSDs allowing users to specify block/fs and SCSI/NVMe devices
# Format of this variable will be "#,scsi/nvme,block/fs" you can specify multiple
# configurations by separating them by a semi-colon ex. "2,scsi,fs;1,nvme,block"
# is a request for 2 SCSI formatted and mounted SSDs and 1 NVMe block device SSD.
NODE_LOCAL_SSDS_EXT=${NODE_LOCAL_SSDS_EXT:-}
# Accelerators to be attached to each node. Format "type=<accelerator-type>,count=<accelerator-count>"
# More information on available GPUs here - https://cloud.google.com/compute/docs/gpus/
NODE_ACCELERATORS=${NODE_ACCELERATORS:-""}
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true}
PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false}
PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false}
KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true}
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-} # default value calculated below
CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false}
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
if [[ "${MASTER_OS_DISTRIBUTION}" == "cos" ]]; then
MASTER_OS_DISTRIBUTION="gci"
fi
if [[ "${NODE_OS_DISTRIBUTION}" == "cos" ]]; then
NODE_OS_DISTRIBUTION="gci"
fi
# GPUs supported in GCE do not have compatible drivers in Debian 7.
if [[ "${NODE_OS_DISTRIBUTION}" == "debian" ]]; then
NODE_ACCELERATORS=""
fi
# By default a cluster will be started with the master and nodes
# on Container-optimized OS (cos, previously known as gci). If
# you are updating the os image versions, update this variable.
# Also please update corresponding image for node e2e at:
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
CVM_VERSION=${CVM_VERSION:-container-vm-v20170627}
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-65-10323-64-0}
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}}
NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-cos-cloud}
NODE_SERVICE_ACCOUNT=${KUBE_GCE_NODE_SERVICE_ACCOUNT:-default}
CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker}
CONTAINER_RUNTIME_ENDPOINT=${KUBE_CONTAINER_RUNTIME_ENDPOINT:-}
CONTAINER_RUNTIME_NAME=${KUBE_CONTAINER_RUNTIME_NAME:-}
LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-}
# MASTER_EXTRA_METADATA is the extra instance metadata on master instance separated by commas.
MASTER_EXTRA_METADATA=${KUBE_MASTER_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}}
# MASTER_EXTRA_METADATA is the extra instance metadata on node instance separated by commas.
NODE_EXTRA_METADATA=${KUBE_NODE_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}}
# KUBELET_TEST_ARGS are extra arguments passed to kubelet.
KUBELET_TEST_ARGS=${KUBE_KUBELET_EXTRA_ARGS:-}
NETWORK=${KUBE_GCE_NETWORK:-default}
# Enable network deletion by default (for kube-down), unless we're using 'default' network.
if [[ "${NETWORK}" == "default" ]]; then
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-false}
else
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-true}
fi
if [[ "${CREATE_CUSTOM_NETWORK}" == true ]]; then
SUBNETWORK="${SUBNETWORK:-${NETWORK}-custom-subnet}"
fi
INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-kubernetes}"
CLUSTER_NAME="${CLUSTER_NAME:-${INSTANCE_PREFIX}}"
MASTER_NAME="${INSTANCE_PREFIX}-master"
AGGREGATOR_MASTER_NAME="${INSTANCE_PREFIX}-aggregator"
INITIAL_ETCD_CLUSTER="${MASTER_NAME}"
ETCD_QUORUM_READ="${ENABLE_ETCD_QUORUM_READ:-false}"
MASTER_TAG="${INSTANCE_PREFIX}-master"
NODE_TAG="${INSTANCE_PREFIX}-minion"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-$(get-cluster-ip-range)}"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
# NODE_IP_RANGE is used when ENABLE_IP_ALIASES=true or CREATE_CUSTOM_NETWORK=true.
# It is the primary range in the subnet and is the range used for node instance IPs.
NODE_IP_RANGE="$(get-node-ip-range)"
# NOTE: Avoid giving nodes empty scopes, because kubelet needs a service account
# in order to initialize properly.
NODE_SCOPES="${NODE_SCOPES:-monitoring,logging-write,storage-ro}"
# Extra docker options for nodes.
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-}"
VOLUME_PLUGIN_DIR="${VOLUME_PLUGIN_DIR:-/home/kubernetes/flexvolume}"
SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/16}" # formerly PORTAL_NET
ALLOCATE_NODE_CIDRS=true
# When set to true, Docker Cache is enabled by default as part of the cluster bring up.
ENABLE_DOCKER_REGISTRY_CACHE=true
# Optional: Deploy a L7 loadbalancer controller to fulfill Ingress requests:
# glbc - CE L7 Load Balancer Controller
ENABLE_L7_LOADBALANCING="${KUBE_ENABLE_L7_LOADBALANCING:-glbc}"
# Optional: Cluster monitoring to setup as part of the cluster bring up:
# none - No cluster monitoring setup
# influxdb - Heapster, InfluxDB, and Grafana
# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging
# stackdriver - Heapster, Google Cloud Monitoring (schema container), and Google Cloud Logging
# googleinfluxdb - Enable influxdb and google (except GCM)
# standalone - Heapster only. Metrics available via Heapster REST API.
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-standalone}"
# Optional: Enable deploying separate prometheus stack for monitoring kubernetes cluster
ENABLE_PROMETHEUS_MONITORING="${KUBE_ENABLE_PROMETHEUS_MONITORING:-false}"
# Optional: Enable Metrics Server. Metrics Server should be enable everywhere,
# since it's a critical component, but in the first release we need a way to disable
# this in case of stability issues.
# TODO(piosz) remove this option once Metrics Server became a stable thing.
ENABLE_METRICS_SERVER="${KUBE_ENABLE_METRICS_SERVER:-true}"
# Optional: Metadata agent to setup as part of the cluster bring up:
# none - No metadata agent
# stackdriver - Stackdriver metadata agent
# Metadata agent is a daemon set that provides metadata of kubernetes objects
# running on the same node for exporting metrics and logs.
ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}"
# One special node out of NUM_NODES would be created of this type if specified.
# Useful for scheduling heapster in large clusters with nodes of small size.
HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
# Historically fluentd was a manifest pod and then was migrated to DaemonSet.
# To avoid situation during cluster upgrade when there are two instances
# of fluentd running on a node, kubelet need to mark node on which
# fluentd is not running as a manifest pod with appropriate label.
# TODO(piosz): remove this in 1.8
NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
# NON_MASTER_NODE_LABELS are labels will only be applied on non-master nodes.
NON_MASTER_NODE_LABELS="${KUBE_NON_MASTER_NODE_LABELS:-}"
# To avoid running Calico on a node that is not configured appropriately,
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
if [[ ${NETWORK_POLICY_PROVIDER:-} == "calico" ]]; then
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}projectcalico.org/ds-ready=true"
fi
# Optional: Enable netd.
ENABLE_NETD="${KUBE_ENABLE_NETD:-false}"
CUSTOM_NETD_YAML="${KUBE_CUSTOM_NETD_YAML:-}"
# To avoid running netd on a node that is not configured appropriately,
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
if [[ ${ENABLE_NETD:-} == "true" ]]; then
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}beta.kubernetes.io/kube-netd-ready=true"
fi
# Enable metadata concealment by firewalling pod traffic to the metadata server
# and run a proxy daemonset on nodes.
#
# TODO(#8867) Enable by default.
ENABLE_METADATA_CONCEALMENT="${ENABLE_METADATA_CONCEALMENT:-false}" # true, false
if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then
# Put the necessary label on the node so the daemonset gets scheduled.
NODE_LABELS="${NODE_LABELS},beta.kubernetes.io/metadata-proxy-ready=true"
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_METADATA_CONCEALMENT METADATA_CONCEALMENT_NO_FIREWALL"
fi
# Enable AESGCM encryption of secrets by default.
ENCRYPTION_PROVIDER_CONFIG="${ENCRYPTION_PROVIDER_CONFIG:-}"
if [[ -z "${ENCRYPTION_PROVIDER_CONFIG}" ]]; then
ENCRYPTION_PROVIDER_CONFIG=$(cat << EOM | base64 | tr -d '\r\n'
kind: EncryptionConfig
apiVersion: v1
resources:
- resources:
- secrets
providers:
- aesgcm:
keys:
- name: key1
secret: $(dd if=/dev/random bs=32 count=1 status=none | base64 | tr -d '\r\n')
EOM
)
fi
# Optional: Enable node logging.
ENABLE_NODE_LOGGING="${KUBE_ENABLE_NODE_LOGGING:-true}"
LOGGING_DESTINATION="${KUBE_LOGGING_DESTINATION:-gcp}" # options: elasticsearch, gcp
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
ENABLE_CLUSTER_LOGGING="${KUBE_ENABLE_CLUSTER_LOGGING:-true}"
ELASTICSEARCH_LOGGING_REPLICAS=1
# Optional: Don't require https for registries in our local RFC1918 network
if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:-false} == "true" ]]; then
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS} --insecure-registry 10.0.0.0/8"
fi
# Optional: customize runtime config
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}"
if [[ "${KUBE_FEATURE_GATES:-}" == "AllAlpha=true" ]]; then
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-api/all=true}"
fi
# Optional: set feature gates
FEATURE_GATES="${KUBE_FEATURE_GATES:-ExperimentalCriticalPodAnnotation=true}"
if [[ ! -z "${NODE_ACCELERATORS}" ]]; then
FEATURE_GATES="${FEATURE_GATES},DevicePlugins=true"
if [[ "${NODE_ACCELERATORS}" =~ .*type=([a-zA-Z0-9-]+).* ]]; then
NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}"
fi
fi
# Optional: Install cluster DNS.
# Set CLUSTER_DNS_CORE_DNS to 'true' to install CoreDNS instead of kube-dns.
CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-false}"
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="${KUBE_DNS_SERVER_IP:-10.0.0.10}"
DNS_DOMAIN="${KUBE_DNS_DOMAIN:-cluster.local}"
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}"
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
# Optional: Install node problem detector.
# none - Not run node problem detector.
# daemonset - Run node problem detector as daemonset.
# standalone - Run node problem detector as standalone system daemon.
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]]; then
# Enable standalone mode by default for gci.
ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-standalone}"
else
ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-daemonset}"
fi
NODE_PROBLEM_DETECTOR_VERSION="${NODE_PROBLEM_DETECTOR_VERSION:-}"
NODE_PROBLEM_DETECTOR_TAR_HASH="${NODE_PROBLEM_DETECTOR_TAR_HASH:-}"
# Optional: Create autoscaler for cluster's nodes.
ENABLE_CLUSTER_AUTOSCALER="${KUBE_ENABLE_CLUSTER_AUTOSCALER:-false}"
if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-}"
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-}"
AUTOSCALER_ENABLE_SCALE_DOWN="${KUBE_AUTOSCALER_ENABLE_SCALE_DOWN:-true}"
AUTOSCALER_EXPANDER_CONFIG="${KUBE_AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
fi
# Optional: Enable Rescheduler
ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
# Optional: Enable allocation of pod IPs using IP aliases.
#
# BETA FEATURE.
#
# IP_ALIAS_SIZE is the size of the podCIDR allocated to a node.
# IP_ALIAS_SUBNETWORK is the subnetwork to allocate from. If empty, a
# new subnetwork will be created for the cluster.
ENABLE_IP_ALIASES=${KUBE_GCE_ENABLE_IP_ALIASES:-false}
NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-RangeAllocator}
if [ ${ENABLE_IP_ALIASES} = true ]; then
# Number of Pods that can run on this node.
MAX_PODS_PER_NODE=${MAX_PODS_PER_NODE:-110}
# Size of ranges allocated to each node.
IP_ALIAS_SIZE="/$(get-alias-range-size ${MAX_PODS_PER_NODE})"
IP_ALIAS_SUBNETWORK=${KUBE_GCE_IP_ALIAS_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-default}
# If we're using custom network, use the subnet we already create for it as the one for ip-alias.
# Note that this means SUBNETWORK would override KUBE_GCE_IP_ALIAS_SUBNETWORK in case of custom network.
if [[ "${CREATE_CUSTOM_NETWORK}" == true ]]; then
IP_ALIAS_SUBNETWORK="${SUBNETWORK}"
fi
# Reserve the services IP space to avoid being allocated for other GCP resources.
SERVICE_CLUSTER_IP_SUBNETWORK=${KUBE_GCE_SERVICE_CLUSTER_IP_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-services}
NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-CloudAllocator}
SECONDARY_RANGE_NAME=${SECONDARY_RANGE_NAME:-}
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_IP_ALIASES"
PROVIDER_VARS="${PROVIDER_VARS:-} NODE_IPAM_MODE"
PROVIDER_VARS="${PROVIDER_VARS:-} SECONDARY_RANGE_NAME"
elif [[ -n "${MAX_PODS_PER_NODE:-}" ]]; then
# Should not have MAX_PODS_PER_NODE set for route-based clusters.
echo -e "${color_red}Cannot set MAX_PODS_PER_NODE for route-based projects for ${PROJECT}." >&2
exit 1
fi
# Enable GCE Alpha features.
if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} GCE_ALPHA_FEATURES"
fi
# Disable Docker live-restore.
if [[ -n "${DISABLE_DOCKER_LIVE_RESTORE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} DISABLE_DOCKER_LIVE_RESTORE"
fi
# Override default GLBC image
if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} GCE_GLBC_IMAGE"
fi
# Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
fi
# MutatingAdmissionWebhook should be the last controller that modifies the
# request object, otherwise users will be confused if the mutating webhooks'
# modification is overwritten.
ADMISSION_CONTROL="${ADMISSION_CONTROL},MutatingAdmissionWebhook,ValidatingAdmissionWebhook"
# ResourceQuota must come last, or a creation is recorded, but the pod was forbidden.
ADMISSION_CONTROL="${ADMISSION_CONTROL},ResourceQuota"
# Optional: if set to true kube-up will automatically check for existing resources and clean them up.
KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
# Storage backend. 'etcd2' supported, 'etcd3' experimental.
STORAGE_BACKEND=${STORAGE_BACKEND:-}
# Networking plugin specific settings.
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, kubenet
# Network Policy plugin specific settings.
NETWORK_POLICY_PROVIDER="${NETWORK_POLICY_PROVIDER:-none}" # calico
NON_MASQUERADE_CIDR="0.0.0.0/0"
# How should the kubelet configure hairpin mode?
HAIRPIN_MODE="${HAIRPIN_MODE:-hairpin-veth}" # promiscuous-bridge, hairpin-veth, none
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
E2E_STORAGE_TEST_ENVIRONMENT="${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}"
# Evict pods whenever compute resource availability on the nodes gets below a threshold.
EVICTION_HARD="${EVICTION_HARD:-memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%}"
# Optional: custom scheduling algorithm
SCHEDULING_ALGORITHM_PROVIDER="${SCHEDULING_ALGORITHM_PROVIDER:-}"
# Optional: install a default StorageClass
ENABLE_DEFAULT_STORAGE_CLASS="${ENABLE_DEFAULT_STORAGE_CLASS:-true}"
# Optional: Enable legacy ABAC policy that makes all service accounts superusers.
ENABLE_LEGACY_ABAC="${ENABLE_LEGACY_ABAC:-false}" # true, false
# Indicates if the values (i.e. KUBE_USER and KUBE_PASSWORD for basic
# authentication) in metadata should be treated as canonical, and therefore disk
# copies ought to be recreated/clobbered.
METADATA_CLOBBERS_CONFIG="${METADATA_CLOBBERS_CONFIG:-false}"
ENABLE_BIG_CLUSTER_SUBNETS="${ENABLE_BIG_CLUSTER_SUBNETS:-false}"
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]]; then
echo "Warning: Basic audit logging is deprecated and will be removed. Please use advanced auditing instead."
fi
if [[ -n "${LOGROTATE_FILES_MAX_COUNT:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_FILES_MAX_COUNT"
fi
if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_MAX_SIZE"
fi
# Fluentd requirements
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.2-1.5.30-1-k8s}"
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}"
# Heapster requirements
HEAPSTER_GCP_BASE_MEMORY="${HEAPSTER_GCP_BASE_MEMORY:-140Mi}"
HEAPSTER_GCP_MEMORY_PER_NODE="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
HEAPSTER_GCP_BASE_CPU="${HEAPSTER_GCP_BASE_CPU:-80m}"
HEAPSTER_GCP_CPU_PER_NODE="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
# Default Stackdriver resources version exported by Fluentd-gcp addon
LOGGING_STACKDRIVER_RESOURCE_TYPES="${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER LOGGING_STACKDRIVER_RESOURCE_TYPES"
# Fluentd configuration for node-journal
ENABLE_NODE_JOURNAL="${ENABLE_NODE_JOURNAL:-false}"
# prometheus-to-sd configuration
PROMETHEUS_TO_SD_ENDPOINT="${PROMETHEUS_TO_SD_ENDPOINT:-https://monitoring.googleapis.com/}"
PROMETHEUS_TO_SD_PREFIX="${PROMETHEUS_TO_SD_PREFIX:-custom.googleapis.com}"
ENABLE_PROMETHEUS_TO_SD="${ENABLE_PROMETHEUS_TO_SD:-false}"
# TODO(#51292): Make kube-proxy Daemonset default and remove the configuration here.
# Optional: [Experiment Only] Run kube-proxy as a DaemonSet if set to true, run as static pods otherwise.
KUBE_PROXY_DAEMONSET="${KUBE_PROXY_DAEMONSET:-false}" # true, false
# Optional: duration of cluster signed certificates.
CLUSTER_SIGNING_DURATION="${CLUSTER_SIGNING_DURATION:-}"
# Optional: enable pod priority
ENABLE_POD_PRIORITY="${ENABLE_POD_PRIORITY:-}"
if [[ "${ENABLE_POD_PRIORITY}" == "true" ]]; then
FEATURE_GATES="${FEATURE_GATES},PodPriority=true"
fi
# Optional: enable certificate rotation of the kubelet certificates.
ROTATE_CERTIFICATES="${ROTATE_CERTIFICATES:-}"
# The number of services that are allowed to sync concurrently. Will be passed
# into kube-controller-manager via `--concurrent-service-syncs`
CONCURRENT_SERVICE_SYNCS="${CONCURRENT_SERVICE_SYNCS:-}"
if [[ "${ENABLE_TOKENREQUEST:-}" == "true" ]]; then
FEATURE_GATES="${FEATURE_GATES},TokenRequest=true"
SERVICEACCOUNT_ISSUER="https://kubernetes.io/${CLUSTER_NAME}"
SERVICEACCOUNT_API_AUDIENCES="https://kubernetes.default.svc"
fi

View File

@ -1,474 +0,0 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(jbeda): Provide a way to override project
# gcloud multiplexing for shared GCE/GKE tests.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/gce/config-common.sh"
# Specifying KUBE_GCE_API_ENDPOINT will override the default GCE Compute API endpoint (https://www.googleapis.com/compute/v1/).
# This endpoint has to be pointing to v1 api. For example, https://www.googleapis.com/compute/staging_v1/
GCE_API_ENDPOINT=${KUBE_GCE_API_ENDPOINT:-}
GCLOUD=gcloud
ZONE=${KUBE_GCE_ZONE:-us-central1-b}
REGION=${ZONE%-*}
RELEASE_REGION_FALLBACK=${RELEASE_REGION_FALLBACK:-false}
REGIONAL_KUBE_ADDONS=${REGIONAL_KUBE_ADDONS:-true}
NODE_SIZE=${NODE_SIZE:-n1-standard-2}
NUM_NODES=${NUM_NODES:-3}
MASTER_SIZE=${MASTER_SIZE:-n1-standard-$(get-master-size)}
MASTER_MIN_CPU_ARCHITECTURE=${MASTER_MIN_CPU_ARCHITECTURE:-} # To allow choosing better architectures.
MASTER_DISK_TYPE=pd-ssd
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-$(get-master-disk-size)}
MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-$(get-master-root-disk-size)}
NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard}
NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
NODE_LOCAL_SSDS=${NODE_LOCAL_SSDS:-0}
# An extension to local SSDs allowing users to specify block/fs and SCSI/NVMe devices
# Format of this variable will be "#,scsi/nvme,block/fs" you can specify multiple
# configurations by separating them by a semi-colon ex. "2,scsi,fs;1,nvme,block"
# is a request for 2 SCSI formatted and mounted SSDs and 1 NVMe block device SSD.
NODE_LOCAL_SSDS_EXT=${NODE_LOCAL_SSDS_EXT:-}
NODE_ACCELERATORS=${NODE_ACCELERATORS:-""}
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true}
KUBE_APISERVER_REQUEST_TIMEOUT=300
PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false}
PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false}
KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true}
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-true}
CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false}
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
if [[ "${MASTER_OS_DISTRIBUTION}" == "cos" ]]; then
MASTER_OS_DISTRIBUTION="gci"
fi
if [[ "${NODE_OS_DISTRIBUTION}" == "cos" ]]; then
NODE_OS_DISTRIBUTION="gci"
fi
# GPUs supported in GCE do not have compatible drivers in Debian 7.
if [[ "${NODE_OS_DISTRIBUTION}" == "debian" ]]; then
NODE_ACCELERATORS=""
fi
# To avoid failing large tests due to some flakes in starting nodes, allow
# for a small percentage of nodes to not start during cluster startup.
ALLOWED_NOTREADY_NODES="${ALLOWED_NOTREADY_NODES:-$((NUM_NODES / 100))}"
# By default a cluster will be started with the master and nodes
# on Container-optimized OS (cos, previously known as gci). If
# you are updating the os image versions, update this variable.
# Also please update corresponding image for node e2e at:
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
CVM_VERSION=${CVM_VERSION:-container-vm-v20170627}
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-65-10323-64-0}
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}}
NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-cos-cloud}
NODE_SERVICE_ACCOUNT=${KUBE_GCE_NODE_SERVICE_ACCOUNT:-default}
CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker}
CONTAINER_RUNTIME_ENDPOINT=${KUBE_CONTAINER_RUNTIME_ENDPOINT:-}
CONTAINER_RUNTIME_NAME=${KUBE_CONTAINER_RUNTIME_NAME:-}
LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-}
GCI_DOCKER_VERSION=${KUBE_GCI_DOCKER_VERSION:-}
# MASTER_EXTRA_METADATA is the extra instance metadata on master instance separated by commas.
MASTER_EXTRA_METADATA=${KUBE_MASTER_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}}
# MASTER_EXTRA_METADATA is the extra instance metadata on node instance separated by commas.
NODE_EXTRA_METADATA=${KUBE_NODE_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}}
NETWORK=${KUBE_GCE_NETWORK:-e2e-test-${USER}}
if [[ "${CREATE_CUSTOM_NETWORK}" == true ]]; then
SUBNETWORK="${SUBNETWORK:-${NETWORK}-custom-subnet}"
fi
INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-e2e-test-${USER}}"
CLUSTER_NAME="${CLUSTER_NAME:-${INSTANCE_PREFIX}}"
MASTER_NAME="${INSTANCE_PREFIX}-master"
AGGREGATOR_MASTER_NAME="${INSTANCE_PREFIX}-aggregator"
INITIAL_ETCD_CLUSTER="${MASTER_NAME}"
ETCD_QUORUM_READ="${ENABLE_ETCD_QUORUM_READ:-false}"
MASTER_TAG="${INSTANCE_PREFIX}-master"
NODE_TAG="${INSTANCE_PREFIX}-minion"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-$(get-cluster-ip-range)}"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
# NODE_IP_RANGE is used when ENABLE_IP_ALIASES=true or CREATE_CUSTOM_NETWORK=true.
# It is the primary range in the subnet and is the range used for node instance IPs.
NODE_IP_RANGE="$(get-node-ip-range)"
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}"
if [[ "${KUBE_FEATURE_GATES:-}" == "AllAlpha=true" ]]; then
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-api/all=true}"
fi
# Optional: set feature gates
FEATURE_GATES="${KUBE_FEATURE_GATES:-ExperimentalCriticalPodAnnotation=true}"
TERMINATED_POD_GC_THRESHOLD=${TERMINATED_POD_GC_THRESHOLD:-100}
# Extra docker options for nodes.
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-}"
# Enable the docker debug mode.
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS} --debug"
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
# When set to true, Docker Cache is enabled by default as part of the cluster bring up.
ENABLE_DOCKER_REGISTRY_CACHE=true
# Optional: Deploy a L7 loadbalancer controller to fulfill Ingress requests:
# glbc - CE L7 Load Balancer Controller
ENABLE_L7_LOADBALANCING="${KUBE_ENABLE_L7_LOADBALANCING:-glbc}"
# Optional: Cluster monitoring to setup as part of the cluster bring up:
# none - No cluster monitoring setup
# influxdb - Heapster, InfluxDB, and Grafana
# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging
# stackdriver - Heapster, Google Cloud Monitoring (schema container), and Google Cloud Logging
# googleinfluxdb - Enable influxdb and google (except GCM)
# standalone - Heapster only. Metrics available via Heapster REST API.
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-standalone}"
# Optional: Enable deploying separate prometheus stack for monitoring kubernetes cluster
ENABLE_PROMETHEUS_MONITORING="${KUBE_ENABLE_PROMETHEUS_MONITORING:-false}"
# Optional: Enable Metrics Server. Metrics Server should be enable everywhere,
# since it's a critical component, but in the first release we need a way to disable
# this in case of stability issues.
# TODO(piosz) remove this option once Metrics Server became a stable thing.
ENABLE_METRICS_SERVER="${KUBE_ENABLE_METRICS_SERVER:-true}"
# Optional: Metadata agent to setup as part of the cluster bring up:
# none - No metadata agent
# stackdriver - Stackdriver metadata agent
# Metadata agent is a daemon set that provides metadata of kubernetes objects
# running on the same node for exporting metrics and logs.
ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}"
# One special node out of NUM_NODES would be created of this type if specified.
# Useful for scheduling heapster in large clusters with nodes of small size.
HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.2.18-0) if you need
# non-default version.
ETCD_IMAGE="${TEST_ETCD_IMAGE:-}"
ETCD_DOCKER_REPOSITORY="${TEST_ETCD_DOCKER_REPOSITORY:-}"
ETCD_VERSION="${TEST_ETCD_VERSION:-}"
# Default Log level for all components in test clusters and variables to override it in specific components.
TEST_CLUSTER_LOG_LEVEL="${TEST_CLUSTER_LOG_LEVEL:---v=4}"
KUBELET_TEST_LOG_LEVEL="${KUBELET_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
DOCKER_TEST_LOG_LEVEL="${DOCKER_TEST_LOG_LEVEL:---log-level=info}"
API_SERVER_TEST_LOG_LEVEL="${API_SERVER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
CONTROLLER_MANAGER_TEST_LOG_LEVEL="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
SCHEDULER_TEST_LOG_LEVEL="${SCHEDULER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
KUBEPROXY_TEST_LOG_LEVEL="${KUBEPROXY_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
VOLUME_PLUGIN_DIR="${VOLUME_PLUGIN_DIR:-/home/kubernetes/flexvolume}"
TEST_CLUSTER_DELETE_COLLECTION_WORKERS="${TEST_CLUSTER_DELETE_COLLECTION_WORKERS:---delete-collection-workers=1}"
TEST_CLUSTER_MAX_REQUESTS_INFLIGHT="${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT:-}"
TEST_CLUSTER_RESYNC_PERIOD="${TEST_CLUSTER_RESYNC_PERIOD:---min-resync-period=3m}"
# ContentType used by all components to communicate with apiserver.
TEST_CLUSTER_API_CONTENT_TYPE="${TEST_CLUSTER_API_CONTENT_TYPE:-}"
KUBELET_TEST_ARGS="${KUBELET_TEST_ARGS:-} --serialize-image-pulls=false ${TEST_CLUSTER_API_CONTENT_TYPE}"
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]] || [[ "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]] || [[ "${NODE_OS_DISTRIBUTION}" == "custom" ]]; then
NODE_KUBELET_TEST_ARGS="${NODE_KUBELET_TEST_ARGS:-} --experimental-kernel-memcg-notification=true"
fi
if [[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]] || [[ "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]]; then
MASTER_KUBELET_TEST_ARGS="${MASTER_KUBELET_TEST_ARGS:-} --experimental-kernel-memcg-notification=true"
fi
APISERVER_TEST_ARGS="${APISERVER_TEST_ARGS:-} --vmodule=httplog=3 --runtime-config=extensions/v1beta1,scheduling.k8s.io/v1alpha1,settings.k8s.io/v1alpha1 ${TEST_CLUSTER_DELETE_COLLECTION_WORKERS} ${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT}"
CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE}"
SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
KUBEPROXY_TEST_ARGS="${KUBEPROXY_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
# Historically fluentd was a manifest pod and then was migrated to DaemonSet.
# To avoid situation during cluster upgrade when there are two instances
# of fluentd running on a node, kubelet need to mark node on which
# fluentd is not running as a manifest pod with appropriate label.
# TODO(piosz): remove this in 1.8
NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
# NON_MASTER_NODE_LABELS are labels will only be applied on non-master nodes.
NON_MASTER_NODE_LABELS="${KUBE_NON_MASTER_NODE_LABELS:-}"
# Optional: Enable netd.
ENABLE_NETD="${KUBE_ENABLE_NETD:-false}"
CUSTOM_NETD_YAML="${KUBE_CUSTOM_NETD_YAML:-}"
# To avoid running netd on a node that is not configured appropriately,
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
if [[ ${ENABLE_NETD:-} == "true" ]]; then
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}beta.kubernetes.io/kube-netd-ready=true"
fi
# To avoid running Calico on a node that is not configured appropriately,
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
if [[ ${NETWORK_POLICY_PROVIDER:-} == "calico" ]]; then
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}projectcalico.org/ds-ready=true"
fi
# Enable metadata concealment by firewalling pod traffic to the metadata server
# and run a proxy daemonset on nodes.
ENABLE_METADATA_CONCEALMENT="${ENABLE_METADATA_CONCEALMENT:-true}" # true, false
if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then
# Put the necessary label on the node so the daemonset gets scheduled.
NODE_LABELS="${NODE_LABELS},beta.kubernetes.io/metadata-proxy-ready=true"
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_METADATA_CONCEALMENT METADATA_CONCEALMENT_NO_FIREWALL"
fi
# Optional: Enable node logging.
ENABLE_NODE_LOGGING="${KUBE_ENABLE_NODE_LOGGING:-true}"
LOGGING_DESTINATION="${KUBE_LOGGING_DESTINATION:-gcp}" # options: elasticsearch, gcp
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
ENABLE_CLUSTER_LOGGING="${KUBE_ENABLE_CLUSTER_LOGGING:-true}"
ELASTICSEARCH_LOGGING_REPLICAS=1
# Optional: Don't require https for registries in our local RFC1918 network
if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:-false} == "true" ]]; then
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS} --insecure-registry 10.0.0.0/8"
fi
if [[ ! -z "${NODE_ACCELERATORS}" ]]; then
FEATURE_GATES="${FEATURE_GATES},DevicePlugins=true"
if [[ "${NODE_ACCELERATORS}" =~ .*type=([a-zA-Z0-9-]+).* ]]; then
NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}"
fi
fi
# Optional: Install cluster DNS.
# Set CLUSTER_DNS_CORE_DNS to 'true' to install CoreDNS instead of kube-dns.
CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-false}"
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="10.0.0.10"
DNS_DOMAIN="cluster.local"
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}"
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
# Optional: Install node problem detector.
# none - Not run node problem detector.
# daemonset - Run node problem detector as daemonset.
# standalone - Run node problem detector as standalone system daemon.
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]]; then
# Enable standalone mode by default for gci.
ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-standalone}"
else
ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-daemonset}"
fi
NODE_PROBLEM_DETECTOR_VERSION="${NODE_PROBLEM_DETECTOR_VERSION:-}"
NODE_PROBLEM_DETECTOR_TAR_HASH="${NODE_PROBLEM_DETECTOR_TAR_HASH:-}"
# Optional: Create autoscaler for cluster's nodes.
ENABLE_CLUSTER_AUTOSCALER="${KUBE_ENABLE_CLUSTER_AUTOSCALER:-false}"
if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-}"
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-}"
AUTOSCALER_ENABLE_SCALE_DOWN="${KUBE_AUTOSCALER_ENABLE_SCALE_DOWN:-false}"
AUTOSCALER_EXPANDER_CONFIG="${KUBE_AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
fi
# Optional: Enable Rescheduler
ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
# Optional: Enable allocation of pod IPs using IP aliases.
#
# BETA FEATURE.
#
# IP_ALIAS_SIZE is the size of the podCIDR allocated to a node.
# IP_ALIAS_SUBNETWORK is the subnetwork to allocate from. If empty, a
# new subnetwork will be created for the cluster.
ENABLE_IP_ALIASES=${KUBE_GCE_ENABLE_IP_ALIASES:-false}
NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-RangeAllocator}
if [ ${ENABLE_IP_ALIASES} = true ]; then
# Number of Pods that can run on this node.
MAX_PODS_PER_NODE=${MAX_PODS_PER_NODE:-110}
# Size of ranges allocated to each node.
IP_ALIAS_SIZE="/$(get-alias-range-size ${MAX_PODS_PER_NODE})"
IP_ALIAS_SUBNETWORK=${KUBE_GCE_IP_ALIAS_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-default}
# If we're using custom network, use the subnet we already create for it as the one for ip-alias.
# Note that this means SUBNETWORK would override KUBE_GCE_IP_ALIAS_SUBNETWORK in case of custom network.
if [[ "${CREATE_CUSTOM_NETWORK}" == true ]]; then
IP_ALIAS_SUBNETWORK="${SUBNETWORK}"
fi
# Reserve the services IP space to avoid being allocated for other GCP resources.
SERVICE_CLUSTER_IP_SUBNETWORK=${KUBE_GCE_SERVICE_CLUSTER_IP_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-services}
NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-CloudAllocator}
SECONDARY_RANGE_NAME=${SECONDARY_RANGE_NAME:-}
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_IP_ALIASES"
PROVIDER_VARS="${PROVIDER_VARS:-} NODE_IPAM_MODE"
PROVIDER_VARS="${PROVIDER_VARS:-} SECONDARY_RANGE_NAME"
elif [[ -n "${MAX_PODS_PER_NODE:-}" ]]; then
# Should not have MAX_PODS_PER_NODE set for route-based clusters.
echo -e "${color_red}Cannot set MAX_PODS_PER_NODE for route-based projects for ${PROJECT}." >&2
exit 1
fi
# Enable GCE Alpha features.
if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} GCE_ALPHA_FEATURES"
fi
# Disable Docker live-restore.
if [[ -n "${DISABLE_DOCKER_LIVE_RESTORE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} DISABLE_DOCKER_LIVE_RESTORE"
fi
# Override default GLBC image
if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} GCE_GLBC_IMAGE"
fi
if [[ -z "${KUBE_ADMISSION_CONTROL:-}" ]]; then
ADMISSION_CONTROL="Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,PodPreset,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection"
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
fi
# ResourceQuota must come last, or a creation is recorded, but the pod may be forbidden.
ADMISSION_CONTROL="${ADMISSION_CONTROL},MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
else
ADMISSION_CONTROL=${KUBE_ADMISSION_CONTROL}
fi
# Optional: if set to true kube-up will automatically check for existing resources and clean them up.
KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
# Optional: setting it to true denotes this is a testing cluster,
# so that we can use pulled kubernetes binaries, even if binaries
# are pre-installed in the image. Note that currently this logic
# is only supported in trusty or GCI.
TEST_CLUSTER="${TEST_CLUSTER:-true}"
# Storage backend. 'etcd2' and 'etcd3' are supported.
STORAGE_BACKEND=${STORAGE_BACKEND:-}
# Storage media type: application/json and application/vnd.kubernetes.protobuf are supported.
STORAGE_MEDIA_TYPE=${STORAGE_MEDIA_TYPE:-}
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, kubenet
# Network Policy plugin specific settings.
NETWORK_POLICY_PROVIDER="${NETWORK_POLICY_PROVIDER:-none}" # calico
NON_MASQUERADE_CIDR="0.0.0.0/0"
# How should the kubelet configure hairpin mode?
HAIRPIN_MODE="${HAIRPIN_MODE:-hairpin-veth}" # promiscuous-bridge, hairpin-veth, none
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
# Optional: if set to true, a image puller is deployed. Only for use in e2e clusters.
# TODO: Pipe this through GKE e2e clusters once we know it helps.
PREPULL_E2E_IMAGES="${PREPULL_E2E_IMAGES:-true}"
# Evict pods whenever compute resource availability on the nodes gets below a threshold.
EVICTION_HARD="${EVICTION_HARD:-memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%}"
# Optional: custom scheduling algorithm
SCHEDULING_ALGORITHM_PROVIDER="${SCHEDULING_ALGORITHM_PROVIDER:-}"
# Optional: install a default StorageClass
ENABLE_DEFAULT_STORAGE_CLASS="${ENABLE_DEFAULT_STORAGE_CLASS:-true}"
# Optional: Enable legacy ABAC policy that makes all service accounts superusers.
# Disabling this by default in tests ensures default RBAC policies are sufficient from 1.6+
# Upgrade test jobs that go from a version < 1.6 to a version >= 1.6 should override this to be true.
ENABLE_LEGACY_ABAC="${ENABLE_LEGACY_ABAC:-false}" # true, false
# Enable a simple "AdvancedAuditing" setup for testing.
ENABLE_APISERVER_ADVANCED_AUDIT="${ENABLE_APISERVER_ADVANCED_AUDIT:-true}" # true, false
ADVANCED_AUDIT_LOG_MODE="${ADVANCED_AUDIT_LOG_MODE:-batch}" # batch, blocking
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]]; then
echo "Warning: Basic audit logging is deprecated and will be removed. Please use advanced auditing instead."
fi
ENABLE_BIG_CLUSTER_SUBNETS="${ENABLE_BIG_CLUSTER_SUBNETS:-false}"
if [[ -n "${LOGROTATE_FILES_MAX_COUNT:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_FILES_MAX_COUNT"
fi
if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_MAX_SIZE"
fi
# Fluentd requirements
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.2-1.5.30-1-k8s}"
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}"
# Heapster requirements
HEAPSTER_GCP_BASE_MEMORY="${HEAPSTER_GCP_BASE_MEMORY:-140Mi}"
HEAPSTER_GCP_MEMORY_PER_NODE="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
HEAPSTER_GCP_BASE_CPU="${HEAPSTER_GCP_BASE_CPU:-80m}"
HEAPSTER_GCP_CPU_PER_NODE="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
# Default Stackdriver resources version exported by Fluentd-gcp addon
LOGGING_STACKDRIVER_RESOURCE_TYPES="${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER LOGGING_STACKDRIVER_RESOURCE_TYPES"
# Fluentd configuration for node-journal
ENABLE_NODE_JOURNAL="${ENABLE_NODE_JOURNAL:-false}"
# prometheus-to-sd configuration
PROMETHEUS_TO_SD_ENDPOINT="${PROMETHEUS_TO_SD_ENDPOINT:-https://monitoring.googleapis.com/}"
PROMETHEUS_TO_SD_PREFIX="${PROMETHEUS_TO_SD_PREFIX:-custom.googleapis.com}"
ENABLE_PROMETHEUS_TO_SD="${ENABLE_PROMETHEUS_TO_SD:-true}"
# TODO(#51292): Make kube-proxy Daemonset default and remove the configuration here.
# Optional: [Experiment Only] Run kube-proxy as a DaemonSet if set to true, run as static pods otherwise.
KUBE_PROXY_DAEMONSET="${KUBE_PROXY_DAEMONSET:-false}" # true, false
# Optional: Change the kube-proxy implementation. Choices are [iptables, ipvs].
KUBE_PROXY_MODE="${KUBE_PROXY_MODE:-iptables}"
# Optional: duration of cluster signed certificates.
CLUSTER_SIGNING_DURATION="${CLUSTER_SIGNING_DURATION:-}"
# Optional: enable pod priority
ENABLE_POD_PRIORITY="${ENABLE_POD_PRIORITY:-}"
if [[ "${ENABLE_POD_PRIORITY}" == "true" ]]; then
FEATURE_GATES="${FEATURE_GATES},PodPriority=true"
fi
# Optional: enable certificate rotation of the kubelet certificates.
ROTATE_CERTIFICATES="${ROTATE_CERTIFICATES:-}"
# The number of services that are allowed to sync concurrently. Will be passed
# into kube-controller-manager via `--concurrent-service-syncs`
CONCURRENT_SERVICE_SYNCS="${CONCURRENT_SERVICE_SYNCS:-}"
if [[ "${ENABLE_TOKENREQUEST:-}" == "true" ]]; then
FEATURE_GATES="${FEATURE_GATES},TokenRequest=true"
SERVICEACCOUNT_ISSUER="https://kubernetes.io/${CLUSTER_NAME}"
SERVICEACCOUNT_API_AUDIENCES="https://kubernetes.default.svc"
fi

View File

@ -1,35 +0,0 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A utility for deleting target pools and forwarding rules that are unattached to VMs
PROJECT=${PROJECT:-kubernetes-jenkins}
REGION=${REGION:-us-central1}
LIST=$(gcloud --project=${PROJECT} compute target-pools list --format='value(name)')
result=0
for x in ${LIST}; do
if ! gcloud compute --project=${PROJECT} target-pools get-health "${x}" --region=${REGION} 2>/dev/null >/dev/null; then
echo DELETING "${x}"
gcloud compute --project=${PROJECT} firewall-rules delete "k8s-fw-${x}" -q
gcloud compute --project=${PROJECT} forwarding-rules delete "${x}" --region=${REGION} -q
gcloud compute --project=${PROJECT} addresses delete "${x}" --region=${REGION} -q
gcloud compute --project=${PROJECT} target-pools delete "${x}" --region=${REGION} -q
result=1
fi
done
exit ${result}

View File

@ -1,71 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_test")
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
load("@io_kubernetes_build//defs:build.bzl", "release_filegroup")
go_test(
name = "go_default_test",
srcs = [
"apiserver_manifest_test.go",
"configure_helper_test.go",
],
data = [
":scripts-test-data",
"//cluster/gce/manifests",
],
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
],
)
# Having the COS code from the GCE cluster deploy hosted with the release is
# useful for GKE. This list should match the list in
# kubernetes/release/lib/releaselib.sh.
release_filegroup(
name = "gcs-release-artifacts",
srcs = [
"configure.sh",
"master.yaml",
"node.yaml",
"shutdown.sh",
],
visibility = ["//visibility:public"],
)
pkg_tar(
name = "gci-trusty-manifests",
srcs = glob(["gke-internal-configure-helper.sh"]),
files = {
"//cluster/gce/gci/mounter": "gci-mounter",
"configure-helper.sh": "gci-configure-helper.sh",
"health-monitor.sh": "health-monitor.sh",
},
mode = "0755",
strip_prefix = ".",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//cluster/gce/gci/mounter:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
filegroup(
name = "scripts-test-data",
srcs = [
"configure-helper.sh",
],
)

View File

@ -1,11 +0,0 @@
# Container-VM Image
[Container-VM Image](https://cloud.google.com/compute/docs/containers/vm-image/)
is a container-optimized OS image for the Google Cloud Platform (GCP). It is
primarily for running Google services on GCP. Unlike the open preview version
of container-vm, the new Container-VM Image is based on the open source
ChromiumOS project, allowing us greater control over the build management,
security compliance, and customizations for GCP.
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/gce/gci/README.md?pixel)]()

View File

@ -1,212 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gci
import (
"encoding/base64"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"k8s.io/api/core/v1"
)
const (
/*
Template for defining the environment state of configure-helper.sh
The environment of configure-helper.sh is initially configured via kube-env file. However, as deploy-helper
executes new variables are created. ManifestTestCase does not care where a variable came from. However, future
test scenarios, may require such a distinction.
The list of variables is, by no means, complete - this is what is required to run currently defined tests.
*/
deployHelperEnv = `
readonly KUBE_HOME={{.KubeHome}}
readonly KUBE_API_SERVER_LOG_PATH=${KUBE_HOME}/kube-apiserver.log
readonly KUBE_API_SERVER_AUDIT_LOG_PATH=${KUBE_HOME}/kube-apiserver-audit.log
readonly CLOUD_CONFIG_OPT=--cloud-config=/etc/gce.conf
readonly CA_CERT_BUNDLE_PATH=/foo/bar
readonly APISERVER_SERVER_CERT_PATH=/foo/bar
readonly APISERVER_SERVER_KEY_PATH=/foo/bar
readonly APISERVER_CLIENT_CERT_PATH=/foo/bar
readonly CLOUD_CONFIG_MOUNT="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true},"
readonly CLOUD_CONFIG_VOLUME="{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"/etc/gce.conf\", \"type\": \"FileOrCreate\"}},"
readonly DOCKER_REGISTRY="k8s.gcr.io"
readonly ENABLE_LEGACY_ABAC=false
readonly ETC_MANIFESTS=${KUBE_HOME}/etc/kubernetes/manifests
readonly KUBE_API_SERVER_DOCKER_TAG=v1.11.0-alpha.0.1808_3c7452dc11645d-dirty
readonly LOG_OWNER_USER=$(whoami)
readonly LOG_OWNER_GROUP=$(id -gn)
ENCRYPTION_PROVIDER_CONFIG={{.EncryptionProviderConfig}}
ENCRYPTION_PROVIDER_CONFIG_PATH={{.EncryptionProviderConfigPath}}
readonly ETCD_KMS_KEY_ID={{.ETCDKMSKeyID}}
`
kubeAPIServerManifestFileName = "kube-apiserver.manifest"
kmsPluginManifestFileName = "kms-plugin-container.manifest"
kubeAPIServerStartFuncName = "start-kube-apiserver"
// Position of containers within a pod manifest
kmsPluginContainerIndex = 0
apiServerContainerIndexNoKMS = 0
apiServerContainerIndexWithKMS = 1
// command": [
// "/bin/sh", - Index 0
// "-c", - Index 1
// "exec /usr/local/bin/kube-apiserver " - Index 2
execArgsIndex = 2
socketVolumeMountIndexKMSPlugin = 1
socketVolumeMountIndexAPIServer = 0
)
type kubeAPIServerEnv struct {
KubeHome string
EncryptionProviderConfig string
EncryptionProviderConfigPath string
ETCDKMSKeyID string
}
type kubeAPIServerManifestTestCase struct {
*ManifestTestCase
apiServerContainer v1.Container
kmsPluginContainer v1.Container
}
func newKubeAPIServerManifestTestCase(t *testing.T) *kubeAPIServerManifestTestCase {
return &kubeAPIServerManifestTestCase{
ManifestTestCase: newManifestTestCase(t, kubeAPIServerManifestFileName, kubeAPIServerStartFuncName, []string{kmsPluginManifestFileName}),
}
}
func (c *kubeAPIServerManifestTestCase) mustLoadContainers() {
c.mustLoadPodFromManifest()
switch len(c.pod.Spec.Containers) {
case 1:
c.apiServerContainer = c.pod.Spec.Containers[apiServerContainerIndexNoKMS]
case 2:
c.apiServerContainer = c.pod.Spec.Containers[apiServerContainerIndexWithKMS]
c.kmsPluginContainer = c.pod.Spec.Containers[kmsPluginContainerIndex]
default:
c.t.Fatalf("got %d containers in apiserver pod, want 1 or 2", len(c.pod.Spec.Containers))
}
}
func (c *kubeAPIServerManifestTestCase) invokeTest(e kubeAPIServerEnv) {
c.mustInvokeFunc(deployHelperEnv, e)
c.mustLoadContainers()
}
func getEncryptionProviderConfigFlag(path string) string {
return fmt.Sprintf("--experimental-encryption-provider-config=%s", path)
}
func TestEncryptionProviderFlag(t *testing.T) {
c := newKubeAPIServerManifestTestCase(t)
defer c.tearDown()
e := kubeAPIServerEnv{
KubeHome: c.kubeHome,
EncryptionProviderConfig: base64.StdEncoding.EncodeToString([]byte("FOO")),
EncryptionProviderConfigPath: filepath.Join(c.kubeHome, "encryption-provider-config.yaml"),
}
c.invokeTest(e)
expectedFlag := getEncryptionProviderConfigFlag(e.EncryptionProviderConfigPath)
execArgs := c.apiServerContainer.Command[execArgsIndex]
if !strings.Contains(execArgs, expectedFlag) {
c.t.Fatalf("Got %q, wanted the flag to contain %q", execArgs, expectedFlag)
}
}
func TestEncryptionProviderConfig(t *testing.T) {
c := newKubeAPIServerManifestTestCase(t)
defer c.tearDown()
p := filepath.Join(c.kubeHome, "encryption-provider-config.yaml")
e := kubeAPIServerEnv{
KubeHome: c.kubeHome,
EncryptionProviderConfig: base64.StdEncoding.EncodeToString([]byte("FOO")),
EncryptionProviderConfigPath: p,
}
c.mustInvokeFunc(deployHelperEnv, e)
if _, err := os.Stat(p); err != nil {
c.t.Fatalf("Expected encryption provider config to be written to %s, but stat failed with error: %v", p, err)
}
}
// TestKMSEncryptionProviderConfig asserts that if ETCD_KMS_KEY_ID is set then start-kube-apiserver will produce
// EncryptionProviderConfig file of type KMS and inject experimental-encryption-provider-config startup flag.
func TestKMSEncryptionProviderConfig(t *testing.T) {
c := newKubeAPIServerManifestTestCase(t)
defer c.tearDown()
e := kubeAPIServerEnv{
KubeHome: c.kubeHome,
EncryptionProviderConfigPath: filepath.Join(c.kubeHome, "encryption-provider-config.yaml"),
ETCDKMSKeyID: "FOO",
}
c.invokeTest(e)
expectedFlag := getEncryptionProviderConfigFlag(e.EncryptionProviderConfigPath)
execArgs := c.apiServerContainer.Command[execArgsIndex]
if !strings.Contains(execArgs, expectedFlag) {
c.t.Fatalf("Got %q, wanted the flag to contain %q", execArgs, expectedFlag)
}
p := filepath.Join(c.kubeHome, "encryption-provider-config.yaml")
if _, err := os.Stat(p); err != nil {
c.t.Fatalf("Expected encryption provider config to be written to %s, but stat failed with error: %v", p, err)
}
d, err := ioutil.ReadFile(p)
if err != nil {
c.t.Fatalf("Failed to read encryption provider config %s", p)
}
if !strings.Contains(string(d), "name: grpc-kms-provider") {
c.t.Fatalf("Got %s\n, wanted encryption provider config to be of type grpc-kms", string(d))
}
}
func TestKMSPluginAndAPIServerSharedVolume(t *testing.T) {
c := newKubeAPIServerManifestTestCase(t)
defer c.tearDown()
var e = kubeAPIServerEnv{
KubeHome: c.kubeHome,
EncryptionProviderConfigPath: filepath.Join(c.kubeHome, "encryption-provider-config.yaml"),
ETCDKMSKeyID: "FOO",
}
c.invokeTest(e)
k := c.kmsPluginContainer.VolumeMounts[socketVolumeMountIndexKMSPlugin].MountPath
a := c.apiServerContainer.VolumeMounts[socketVolumeMountIndexAPIServer].MountPath
if k != a {
t.Fatalf("Got %s!=%s, wanted KMSPlugin VolumeMount #1:%s to be equal to kube-apiserver VolumeMount #0:%s",
k, a, k, a)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,455 +0,0 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Due to the GCE custom metadata size limit, we split the entire script into two
# files configure.sh and configure-helper.sh. The functionality of downloading
# kubernetes configuration, manifests, docker images, and binary files are
# put in configure.sh, which is uploaded via GCE custom metadata.
set -o errexit
set -o nounset
set -o pipefail
### Hardcoded constants
DEFAULT_CNI_VERSION="v0.6.0"
DEFAULT_CNI_SHA1="d595d3ded6499a64e8dac02466e2f5f2ce257c9f"
DEFAULT_NPD_VERSION="v0.5.0"
DEFAULT_NPD_SHA1="650ecfb2ae495175ee43706d0bd862a1ea7f1395"
DEFAULT_CRICTL_VERSION="v1.11.0"
DEFAULT_CRICTL_SHA1="8f5142b985d314cdebb51afd55054d5ec00c442a"
DEFAULT_MOUNTER_TAR_SHA="8003b798cf33c7f91320cd6ee5cec4fa22244571"
###
# Use --retry-connrefused opt only if it's supported by curl.
CURL_RETRY_CONNREFUSED=""
if curl --help | grep -q -- '--retry-connrefused'; then
CURL_RETRY_CONNREFUSED='--retry-connrefused'
fi
function set-broken-motd {
cat > /etc/motd <<EOF
Broken (or in progress) Kubernetes node setup! Check the cluster initialization status
using the following commands.
Master instance:
- sudo systemctl status kube-master-installation
- sudo systemctl status kube-master-configuration
Node instance:
- sudo systemctl status kube-node-installation
- sudo systemctl status kube-node-configuration
EOF
}
function download-kube-env {
# Fetch kube-env from GCE metadata server.
(
umask 077
local -r tmp_kube_env="/tmp/kube-env.yaml"
curl --fail --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o "${tmp_kube_env}" \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env
# Convert the yaml format file into a shell-style file.
eval $(python -c '''
import pipes,sys,yaml
for k,v in yaml.load(sys.stdin).iteritems():
print("readonly {var}={value}".format(var = k, value = pipes.quote(str(v))))
''' < "${tmp_kube_env}" > "${KUBE_HOME}/kube-env")
rm -f "${tmp_kube_env}"
)
}
function download-kubelet-config {
local -r dest="$1"
echo "Downloading Kubelet config file, if it exists"
# Fetch kubelet config file from GCE metadata server.
(
umask 077
local -r tmp_kubelet_config="/tmp/kubelet-config.yaml"
if curl --fail --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o "${tmp_kubelet_config}" \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kubelet-config; then
# only write to the final location if curl succeeds
mv "${tmp_kubelet_config}" "${dest}"
elif [[ "${REQUIRE_METADATA_KUBELET_CONFIG_FILE:-false}" == "true" ]]; then
echo "== Failed to download required Kubelet config file from metadata server =="
exit 1
fi
)
}
function download-kube-master-certs {
# Fetch kube-env from GCE metadata server.
(
umask 077
local -r tmp_kube_master_certs="/tmp/kube-master-certs.yaml"
curl --fail --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o "${tmp_kube_master_certs}" \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-master-certs
# Convert the yaml format file into a shell-style file.
eval $(python -c '''
import pipes,sys,yaml
for k,v in yaml.load(sys.stdin).iteritems():
print("readonly {var}={value}".format(var = k, value = pipes.quote(str(v))))
''' < "${tmp_kube_master_certs}" > "${KUBE_HOME}/kube-master-certs")
rm -f "${tmp_kube_master_certs}"
)
}
function validate-hash {
local -r file="$1"
local -r expected="$2"
actual=$(sha1sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} =="
return 1
fi
}
# Retry a download until we get it. Takes a hash and a set of URLs.
#
# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown.
# $2+ are the URLs to download.
function download-or-bust {
local -r hash="$1"
shift 1
local -r urls=( $* )
while true; do
for url in "${urls[@]}"; do
local file="${url##*/}"
rm -f "${file}"
if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 ${CURL_RETRY_CONNREFUSED} "${url}"; then
echo "== Failed to download ${url}. Retrying. =="
elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
}
function is-preloaded {
local -r key=$1
local -r value=$2
grep -qs "${key},${value}" "${KUBE_HOME}/preload_info"
}
function split-commas {
echo $1 | tr "," "\n"
}
function remount-flexvolume-directory {
local -r flexvolume_plugin_dir=$1
mkdir -p $flexvolume_plugin_dir
mount --bind $flexvolume_plugin_dir $flexvolume_plugin_dir
mount -o remount,exec $flexvolume_plugin_dir
}
function install-gci-mounter-tools {
CONTAINERIZED_MOUNTER_HOME="${KUBE_HOME}/containerized_mounter"
local -r mounter_tar_sha="${DEFAULT_MOUNTER_TAR_SHA}"
if is-preloaded "mounter" "${mounter_tar_sha}"; then
echo "mounter is preloaded."
return
fi
echo "Downloading gci mounter tools."
mkdir -p "${CONTAINERIZED_MOUNTER_HOME}"
chmod a+x "${CONTAINERIZED_MOUNTER_HOME}"
mkdir -p "${CONTAINERIZED_MOUNTER_HOME}/rootfs"
download-or-bust "${mounter_tar_sha}" "https://storage.googleapis.com/kubernetes-release/gci-mounter/mounter.tar"
cp "${KUBE_HOME}/kubernetes/server/bin/mounter" "${CONTAINERIZED_MOUNTER_HOME}/mounter"
chmod a+x "${CONTAINERIZED_MOUNTER_HOME}/mounter"
mv "${KUBE_HOME}/mounter.tar" /tmp/mounter.tar
tar xf /tmp/mounter.tar -C "${CONTAINERIZED_MOUNTER_HOME}/rootfs"
rm /tmp/mounter.tar
mkdir -p "${CONTAINERIZED_MOUNTER_HOME}/rootfs/var/lib/kubelet"
}
# Install node problem detector binary.
function install-node-problem-detector {
if [[ -n "${NODE_PROBLEM_DETECTOR_VERSION:-}" ]]; then
local -r npd_version="${NODE_PROBLEM_DETECTOR_VERSION}"
local -r npd_sha1="${NODE_PROBLEM_DETECTOR_TAR_HASH}"
else
local -r npd_version="${DEFAULT_NPD_VERSION}"
local -r npd_sha1="${DEFAULT_NPD_SHA1}"
fi
local -r npd_tar="node-problem-detector-${npd_version}.tar.gz"
if is-preloaded "${npd_tar}" "${npd_sha1}"; then
echo "node-problem-detector is preloaded."
return
fi
echo "Downloading node problem detector."
local -r npd_release_path="https://storage.googleapis.com/kubernetes-release"
download-or-bust "${npd_sha1}" "${npd_release_path}/node-problem-detector/${npd_tar}"
local -r npd_dir="${KUBE_HOME}/node-problem-detector"
mkdir -p "${npd_dir}"
tar xzf "${KUBE_HOME}/${npd_tar}" -C "${npd_dir}" --overwrite
mv "${npd_dir}/bin"/* "${KUBE_BIN}"
chmod a+x "${KUBE_BIN}/node-problem-detector"
rmdir "${npd_dir}/bin"
rm -f "${KUBE_HOME}/${npd_tar}"
}
function install-cni-binaries {
local -r cni_tar="cni-plugins-amd64-${DEFAULT_CNI_VERSION}.tgz"
local -r cni_sha1="${DEFAULT_CNI_SHA1}"
if is-preloaded "${cni_tar}" "${cni_sha1}"; then
echo "${cni_tar} is preloaded."
return
fi
echo "Downloading cni binaries"
download-or-bust "${cni_sha1}" "https://storage.googleapis.com/kubernetes-release/network-plugins/${cni_tar}"
local -r cni_dir="${KUBE_HOME}/cni"
mkdir -p "${cni_dir}/bin"
tar xzf "${KUBE_HOME}/${cni_tar}" -C "${cni_dir}/bin" --overwrite
mv "${cni_dir}/bin"/* "${KUBE_BIN}"
rmdir "${cni_dir}/bin"
rm -f "${KUBE_HOME}/${cni_tar}"
}
# Install crictl binary.
function install-crictl {
if [[ -n "${CRICTL_VERSION:-}" ]]; then
local -r crictl_version="${CRICTL_VERSION}"
local -r crictl_sha1="${CRICTL_TAR_HASH}"
else
local -r crictl_version="${DEFAULT_CRICTL_VERSION}"
local -r crictl_sha1="${DEFAULT_CRICTL_SHA1}"
fi
local -r crictl="crictl-${crictl_version}-linux-amd64"
if is-preloaded "${crictl}" "${crictl_sha1}"; then
echo "crictl is preloaded"
return
fi
echo "Downloading crictl"
local -r crictl_path="https://storage.googleapis.com/kubernetes-release/crictl"
download-or-bust "${crictl_sha1}" "${crictl_path}/${crictl}"
mv "${KUBE_HOME}/${crictl}" "${KUBE_BIN}/crictl"
chmod a+x "${KUBE_BIN}/crictl"
# Create crictl config file.
cat > /etc/crictl.yaml <<EOF
runtime-endpoint: ${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock}
EOF
}
function install-exec-auth-plugin {
if [[ ! "${EXEC_AUTH_PLUGIN_URL:-}" ]]; then
return
fi
local -r plugin_url="${EXEC_AUTH_PLUGIN_URL}"
local -r plugin_sha1="${EXEC_AUTH_PLUGIN_SHA1}"
echo "Downloading gke-exec-auth-plugin binary"
download-or-bust "${plugin_sha1}" "${plugin_url}"
mv "${KUBE_HOME}/gke-exec-auth-plugin" "${KUBE_BIN}/gke-exec-auth-plugin"
chmod a+x "${KUBE_BIN}/gke-exec-auth-plugin"
}
function install-kube-manifests {
# Put kube-system pods manifests in ${KUBE_HOME}/kube-manifests/.
local dst_dir="${KUBE_HOME}/kube-manifests"
mkdir -p "${dst_dir}"
local -r manifests_tar_urls=( $(split-commas "${KUBE_MANIFESTS_TAR_URL}") )
local -r manifests_tar="${manifests_tar_urls[0]##*/}"
if [ -n "${KUBE_MANIFESTS_TAR_HASH:-}" ]; then
local -r manifests_tar_hash="${KUBE_MANIFESTS_TAR_HASH}"
else
echo "Downloading k8s manifests sha1 (not found in env)"
download-or-bust "" "${manifests_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
local -r manifests_tar_hash=$(cat "${manifests_tar}.sha1")
fi
if is-preloaded "${manifests_tar}" "${manifests_tar_hash}"; then
echo "${manifests_tar} is preloaded."
return
fi
echo "Downloading k8s manifests tar"
download-or-bust "${manifests_tar_hash}" "${manifests_tar_urls[@]}"
tar xzf "${KUBE_HOME}/${manifests_tar}" -C "${dst_dir}" --overwrite
local -r kube_addon_registry="${KUBE_ADDON_REGISTRY:-k8s.gcr.io}"
if [[ "${kube_addon_registry}" != "k8s.gcr.io" ]]; then
find "${dst_dir}" -name \*.yaml -or -name \*.yaml.in | \
xargs sed -ri "s@(image:\s.*)k8s.gcr.io@\1${kube_addon_registry}@"
find "${dst_dir}" -name \*.manifest -or -name \*.json | \
xargs sed -ri "s@(image\":\s+\")k8s.gcr.io@\1${kube_addon_registry}@"
fi
cp "${dst_dir}/kubernetes/gci-trusty/gci-configure-helper.sh" "${KUBE_BIN}/configure-helper.sh"
if [[ -e "${dst_dir}/kubernetes/gci-trusty/gke-internal-configure-helper.sh" ]]; then
cp "${dst_dir}/kubernetes/gci-trusty/gke-internal-configure-helper.sh" "${KUBE_BIN}/"
fi
cp "${dst_dir}/kubernetes/gci-trusty/health-monitor.sh" "${KUBE_BIN}/health-monitor.sh"
rm -f "${KUBE_HOME}/${manifests_tar}"
rm -f "${KUBE_HOME}/${manifests_tar}.sha1"
}
# A helper function for loading a docker image. It keeps trying up to 5 times.
#
# $1: Full path of the docker image
function try-load-docker-image {
local -r img=$1
echo "Try to load docker image file ${img}"
# Temporarily turn off errexit, because we don't want to exit on first failure.
set +e
local -r max_attempts=5
local -i attempt_num=1
until timeout 30 ${LOAD_IMAGE_COMMAND:-docker load -i} "${img}"; do
if [[ "${attempt_num}" == "${max_attempts}" ]]; then
echo "Fail to load docker image file ${img} after ${max_attempts} retries. Exit!!"
exit 1
else
attempt_num=$((attempt_num+1))
sleep 5
fi
done
# Re-enable errexit.
set -e
}
# Loads kube-system docker images. It is better to do it before starting kubelet,
# as kubelet will restart docker daemon, which may interfere with loading images.
function load-docker-images {
echo "Start loading kube-system docker images"
local -r img_dir="${KUBE_HOME}/kube-docker-files"
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
try-load-docker-image "${img_dir}/kube-apiserver.tar"
try-load-docker-image "${img_dir}/kube-controller-manager.tar"
try-load-docker-image "${img_dir}/kube-scheduler.tar"
else
try-load-docker-image "${img_dir}/kube-proxy.tar"
fi
}
# Downloads kubernetes binaries and kube-system manifest tarball, unpacks them,
# and places them into suitable directories. Files are placed in /home/kubernetes.
function install-kube-binary-config {
cd "${KUBE_HOME}"
local -r server_binary_tar_urls=( $(split-commas "${SERVER_BINARY_TAR_URL}") )
local -r server_binary_tar="${server_binary_tar_urls[0]##*/}"
if [[ -n "${SERVER_BINARY_TAR_HASH:-}" ]]; then
local -r server_binary_tar_hash="${SERVER_BINARY_TAR_HASH}"
else
echo "Downloading binary release sha1 (not found in env)"
download-or-bust "" "${server_binary_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
local -r server_binary_tar_hash=$(cat "${server_binary_tar}.sha1")
fi
if is-preloaded "${server_binary_tar}" "${server_binary_tar_hash}"; then
echo "${server_binary_tar} is preloaded."
else
echo "Downloading binary release tar"
download-or-bust "${server_binary_tar_hash}" "${server_binary_tar_urls[@]}"
tar xzf "${KUBE_HOME}/${server_binary_tar}" -C "${KUBE_HOME}" --overwrite
# Copy docker_tag and image files to ${KUBE_HOME}/kube-docker-files.
local -r src_dir="${KUBE_HOME}/kubernetes/server/bin"
local dst_dir="${KUBE_HOME}/kube-docker-files"
mkdir -p "${dst_dir}"
cp "${src_dir}/"*.docker_tag "${dst_dir}"
if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then
cp "${src_dir}/kube-proxy.tar" "${dst_dir}"
else
cp "${src_dir}/kube-apiserver.tar" "${dst_dir}"
cp "${src_dir}/kube-controller-manager.tar" "${dst_dir}"
cp "${src_dir}/kube-scheduler.tar" "${dst_dir}"
cp -r "${KUBE_HOME}/kubernetes/addons" "${dst_dir}"
fi
load-docker-images
mv "${src_dir}/kubelet" "${KUBE_BIN}"
mv "${src_dir}/kubectl" "${KUBE_BIN}"
mv "${KUBE_HOME}/kubernetes/LICENSES" "${KUBE_HOME}"
mv "${KUBE_HOME}/kubernetes/kubernetes-src.tar.gz" "${KUBE_HOME}"
fi
if [[ "${KUBERNETES_MASTER:-}" == "false" ]] && \
[[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
install-node-problem-detector
fi
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]] || \
[[ "${NETWORK_PROVIDER:-}" == "cni" ]]; then
install-cni-binaries
fi
# Put kube-system pods manifests in ${KUBE_HOME}/kube-manifests/.
install-kube-manifests
chmod -R 755 "${KUBE_BIN}"
# Install gci mounter related artifacts to allow mounting storage volumes in GCI
install-gci-mounter-tools
# Remount the Flexvolume directory with the "exec" option, if needed.
if [[ "${REMOUNT_VOLUME_PLUGIN_DIR:-}" == "true" && -n "${VOLUME_PLUGIN_DIR:-}" ]]; then
remount-flexvolume-directory "${VOLUME_PLUGIN_DIR}"
fi
# Install crictl on each node.
install-crictl
if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then
install-exec-auth-plugin
fi
# Clean up.
rm -rf "${KUBE_HOME}/kubernetes"
rm -f "${KUBE_HOME}/${server_binary_tar}"
rm -f "${KUBE_HOME}/${server_binary_tar}.sha1"
}
######### Main Function ##########
echo "Start to install kubernetes files"
# if install fails, message-of-the-day (motd) will warn at login shell
set-broken-motd
KUBE_HOME="/home/kubernetes"
KUBE_BIN="${KUBE_HOME}/bin"
# download and source kube-env
download-kube-env
source "${KUBE_HOME}/kube-env"
download-kubelet-config "${KUBE_HOME}/kubelet-config.yaml"
# master certs
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
download-kube-master-certs
fi
# binaries and kube-system manifests
install-kube-binary-config
echo "Done for installing kubernetes files"

View File

@ -1,172 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gci
import (
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"testing"
"text/template"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
)
const (
envScriptFileName = "kube-env"
configureHelperScriptName = "configure-helper.sh"
)
type ManifestTestCase struct {
pod v1.Pod
envScriptPath string
manifest string
auxManifests []string
kubeHome string
manifestSources string
manifestDestination string
manifestTemplateDir string
manifestTemplate string
manifestFuncName string
t *testing.T
}
func newManifestTestCase(t *testing.T, manifest, funcName string, auxManifests []string) *ManifestTestCase {
c := &ManifestTestCase{
t: t,
manifest: manifest,
auxManifests: auxManifests,
manifestFuncName: funcName,
}
d, err := ioutil.TempDir("", "configure-helper-test")
if err != nil {
c.t.Fatalf("Failed to create temp directory: %v", err)
}
c.kubeHome = d
c.envScriptPath = filepath.Join(c.kubeHome, envScriptFileName)
c.manifestSources = filepath.Join(c.kubeHome, "kube-manifests", "kubernetes", "gci-trusty")
currentPath, err := os.Getwd()
if err != nil {
c.t.Fatalf("Failed to get current directory: %v", err)
}
gceDir := filepath.Dir(currentPath)
c.manifestTemplateDir = filepath.Join(gceDir, "manifests")
c.manifestTemplate = filepath.Join(c.manifestTemplateDir, c.manifest)
c.manifestDestination = filepath.Join(c.kubeHome, "etc", "kubernetes", "manifests", c.manifest)
c.mustCopyFromTemplate()
c.mustCopyAuxFromTemplate()
c.mustCreateManifestDstDir()
return c
}
func (c *ManifestTestCase) mustCopyFromTemplate() {
if err := os.MkdirAll(c.manifestSources, os.ModePerm); err != nil {
c.t.Fatalf("Failed to create source directory: %v", err)
}
if err := copyFile(c.manifestTemplate, filepath.Join(c.manifestSources, c.manifest)); err != nil {
c.t.Fatalf("Failed to copy source manifest to KUBE_HOME: %v", err)
}
}
func (c *ManifestTestCase) mustCopyAuxFromTemplate() {
for _, m := range c.auxManifests {
err := copyFile(filepath.Join(c.manifestTemplateDir, m), filepath.Join(c.manifestSources, m))
if err != nil {
c.t.Fatalf("Failed to copy source manifest %s to KUBE_HOME: %v", m, err)
}
}
}
func (c *ManifestTestCase) mustCreateManifestDstDir() {
p := filepath.Join(filepath.Join(c.kubeHome, "etc", "kubernetes", "manifests"))
if err := os.MkdirAll(p, os.ModePerm); err != nil {
c.t.Fatalf("Failed to create designation folder for kube-apiserver.manifest: %v", err)
}
}
func (c *ManifestTestCase) mustCreateEnv(envTemplate string, env interface{}) {
f, err := os.Create(filepath.Join(c.kubeHome, envScriptFileName))
if err != nil {
c.t.Fatalf("Failed to create envScript: %v", err)
}
defer f.Close()
t := template.Must(template.New("env").Parse(envTemplate))
if err = t.Execute(f, env); err != nil {
c.t.Fatalf("Failed to execute template: %v", err)
}
}
func (c *ManifestTestCase) mustInvokeFunc(envTemplate string, env interface{}) {
c.mustCreateEnv(envTemplate, env)
args := fmt.Sprintf("source %s ; source %s --source-only ; %s", c.envScriptPath, configureHelperScriptName, c.manifestFuncName)
cmd := exec.Command("bash", "-c", args)
bs, err := cmd.CombinedOutput()
if err != nil {
c.t.Logf("%s", bs)
c.t.Fatalf("Failed to run configure-helper.sh: %v", err)
}
c.t.Logf("%s", string(bs))
}
func (c *ManifestTestCase) mustLoadPodFromManifest() {
json, err := ioutil.ReadFile(c.manifestDestination)
if err != nil {
c.t.Fatalf("Failed to read manifest: %s, %v", c.manifestDestination, err)
}
if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), json, &c.pod); err != nil {
c.t.Fatalf("Failed to decode manifest: %v", err)
}
}
func (c *ManifestTestCase) tearDown() {
os.RemoveAll(c.kubeHome)
}
func copyFile(src, dst string) (err error) {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
defer func() {
cerr := out.Close()
if cerr == nil {
err = cerr
}
}()
_, err = io.Copy(out, in)
return err
}

View File

@ -1,184 +0,0 @@
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Sets up FlexVolume drivers on GCE COS instances using mounting utilities packaged in a Google
# Container Registry image.
# The user-provided FlexVolume driver(s) must be under /flexvolume of the image filesystem.
# For example, the driver k8s/nfs must be located at /flexvolume/k8s~nfs/nfs .
#
# This script should be used on a clean instance, with no FlexVolume installed.
# Should not be run on instances with an existing full or partial installation.
# Upon failure, the script will clean up the partial installation automatically.
#
# Must be executed under /home/kubernetes/bin with sudo.
# Warning: kubelet will be restarted upon successful execution.
set -o errexit
set -o nounset
set -o pipefail
MOUNTER_IMAGE=${1:-}
MOUNTER_PATH=/home/kubernetes/flexvolume_mounter
VOLUME_PLUGIN_DIR=/home/kubernetes/flexvolume
usage() {
echo "usage: $0 imagename[:tag]"
echo " imagename Name of a Container Registry image. By default the latest image is used."
echo " :tag Container Registry image tag."
exit 1
}
if [ -z ${MOUNTER_IMAGE} ]; then
echo "ERROR: No Container Registry mounter image is specified."
echo
usage
fi
# Unmounts a mount point lazily. If a mount point does not exist, continue silently,
# and without error.
umount_silent() {
umount -l $1 &> /dev/null || /bin/true
}
# Waits for kubelet to restart for 1 minute.
kubelet_wait() {
timeout=60
kubelet_readonly_port=10255
until [[ $timeout -eq 0 ]]; do
printf "."
if [[ $( curl -s http://localhost:${kubelet_readonly_port}/healthz ) == "ok" ]]; then
return 0
fi
sleep 1
timeout=$(( timeout-1 ))
done
# Timed out waiting for kubelet to become healthy.
return 1
}
flex_clean() {
echo
echo "An error has occurred. Cleaning up..."
echo
umount_silent ${VOLUME_PLUGIN_DIR}
rm -rf ${VOLUME_PLUGIN_DIR}
umount_silent ${MOUNTER_PATH}/var/lib/kubelet
umount_silent ${MOUNTER_PATH}
rm -rf ${MOUNTER_PATH}
if [[ -n ${IMAGE_URL:-} ]]; then
docker rmi -f ${IMAGE_URL} &> /dev/null || /bin/true
fi
if [[ -n ${MOUNTER_DEFAULT_NAME:-} ]]; then
docker rm -f ${MOUNTER_DEFAULT_NAME} &> /dev/null || /bin/true
fi
}
trap flex_clean ERR
# Generates a bash script that wraps all calls to the actual driver inside mount utilities
# in the chroot environment. Kubelet sees this script as the FlexVolume driver.
generate_chroot_wrapper() {
if [ ! -d ${MOUNTER_PATH}/flexvolume ]; then
echo "Failed to set up FlexVolume driver: cannot find directory '/flexvolume' in the mount utility image."
exit 1
fi
for driver_dir in ${MOUNTER_PATH}/flexvolume/*; do
if [ -d "$driver_dir" ]; then
filecount=$(ls -1 $driver_dir | wc -l)
if [ $filecount -gt 1 ]; then
echo "ERROR: Expected 1 file in the FlexVolume directory but found $filecount."
exit 1
fi
driver_file=$( ls $driver_dir | head -n 1 )
# driver_path points to the actual driver inside the mount utility image,
# relative to image root.
# wrapper_path is the wrapper script location, which is known to kubelet.
driver_path=flexvolume/$( basename $driver_dir )/${driver_file}
wrapper_dir=${VOLUME_PLUGIN_DIR}/$( basename $driver_dir )
wrapper_path=${wrapper_dir}/${driver_file}
mkdir -p $wrapper_dir
cat >$wrapper_path <<EOF
#!/usr/bin/env bash
chroot ${MOUNTER_PATH} ${driver_path} "\$@"
EOF
chmod 755 $wrapper_path
echo "FlexVolume driver installed at ${wrapper_path}"
fi
done
}
echo
echo "Importing mount utility image from Container Registry..."
echo
METADATA=http://metadata.google.internal/computeMetadata/v1
SVC_ACCT_ENDPOINT=$METADATA/instance/service-accounts/default
ACCESS_TOKEN=$(curl -s -H 'Metadata-Flavor: Google' $SVC_ACCT_ENDPOINT/token | cut -d'"' -f 4)
PROJECT_ID=$(curl -s -H 'Metadata-Flavor: Google' $METADATA/project/project-id)
IMAGE_URL=gcr.io/${PROJECT_ID}/${MOUNTER_IMAGE}
MOUNTER_DEFAULT_NAME=flexvolume_mounter
sudo -u ${SUDO_USER} docker login -u _token -p $ACCESS_TOKEN https://gcr.io > /dev/null
sudo -u ${SUDO_USER} docker run --name=${MOUNTER_DEFAULT_NAME} ${IMAGE_URL}
docker export ${MOUNTER_DEFAULT_NAME} > /tmp/${MOUNTER_DEFAULT_NAME}.tar
docker rm ${MOUNTER_DEFAULT_NAME} > /dev/null
docker rmi ${IMAGE_URL} > /dev/null
echo
echo "Loading mount utilities onto this instance..."
echo
mkdir -p ${MOUNTER_PATH}
tar xf /tmp/${MOUNTER_DEFAULT_NAME}.tar -C ${MOUNTER_PATH}
# Bind the kubelet directory to one under flexvolume_mounter
mkdir -p ${MOUNTER_PATH}/var/lib/kubelet
mount --rbind /var/lib/kubelet/ ${MOUNTER_PATH}/var/lib/kubelet
mount --make-rshared ${MOUNTER_PATH}/var/lib/kubelet
# Remount the flexvolume_mounter environment with /dev enabled.
mount --bind ${MOUNTER_PATH} ${MOUNTER_PATH}
mount -o remount,dev,exec ${MOUNTER_PATH}
echo
echo "Setting up FlexVolume driver..."
echo
mkdir -p ${VOLUME_PLUGIN_DIR}
mount --bind ${VOLUME_PLUGIN_DIR} ${VOLUME_PLUGIN_DIR}
mount -o remount,exec ${VOLUME_PLUGIN_DIR}
generate_chroot_wrapper
echo
echo "Restarting Kubelet..."
echo
systemctl restart kubelet.service
kubelet_wait
if [ $? -eq 0 ]; then
echo
echo "FlexVolume is ready."
else
echo "ERROR: Timed out after 1 minute waiting for kubelet restart."
fi

View File

@ -1,112 +0,0 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is for master and node instance health monitoring, which is
# packed in kube-manifest tarball. It is executed through a systemd service
# in cluster/gce/gci/<master/node>.yaml. The env variables come from an env
# file provided by the systemd service.
set -o nounset
set -o pipefail
# We simply kill the process when there is a failure. Another systemd service will
# automatically restart the process.
function container_runtime_monitoring {
local -r max_attempts=5
local attempt=1
local -r crictl="${KUBE_HOME}/bin/crictl"
local -r container_runtime_name="${CONTAINER_RUNTIME_NAME:-docker}"
# We still need to use `docker ps` when container runtime is "docker". This is because
# dockershim is still part of kubelet today. When kubelet is down, crictl pods
# will also fail, and docker will be killed. This is undesirable especially when
# docker live restore is disabled.
local healthcheck_command="docker ps"
if [[ "${CONTAINER_RUNTIME:-docker}" != "docker" ]]; then
healthcheck_command="${crictl} pods"
fi
# Container runtime startup takes time. Make initial attempts before starting
# killing the container runtime.
until timeout 60 ${healthcheck_command} > /dev/null; do
if (( attempt == max_attempts )); then
echo "Max attempt ${max_attempts} reached! Proceeding to monitor container runtime healthiness."
break
fi
echo "$attempt initial attempt \"${healthcheck_command}\"! Trying again in $attempt seconds..."
sleep "$(( 2 ** attempt++ ))"
done
while true; do
if ! timeout 60 ${healthcheck_command} > /dev/null; then
echo "Container runtime ${container_runtime_name} failed!"
if [[ "$container_runtime_name" == "docker" ]]; then
# Dump stack of docker daemon for investigation.
# Log fle name looks like goroutine-stacks-TIMESTAMP and will be saved to
# the exec root directory, which is /var/run/docker/ on Ubuntu and COS.
pkill -SIGUSR1 dockerd
fi
systemctl kill --kill-who=main "${container_runtime_name}"
# Wait for a while, as we don't want to kill it again before it is really up.
sleep 120
else
sleep "${SLEEP_SECONDS}"
fi
done
}
function kubelet_monitoring {
echo "Wait for 2 minutes for kubelet to be functional"
# TODO(andyzheng0831): replace it with a more reliable method if possible.
sleep 120
local -r max_seconds=10
local output=""
while [ 1 ]; do
if ! output=$(curl -m "${max_seconds}" -f -s -S http://127.0.0.1:10255/healthz 2>&1); then
# Print the response and/or errors.
echo $output
echo "Kubelet is unhealthy!"
systemctl kill kubelet
# Wait for a while, as we don't want to kill it again before it is really up.
sleep 60
else
sleep "${SLEEP_SECONDS}"
fi
done
}
############## Main Function ################
if [[ "$#" -ne 1 ]]; then
echo "Usage: health-monitor.sh <container-runtime/kubelet>"
exit 1
fi
KUBE_HOME="/home/kubernetes"
KUBE_ENV="${KUBE_HOME}/kube-env"
if [[ ! -e "${KUBE_ENV}" ]]; then
echo "The ${KUBE_ENV} file does not exist!! Terminate health monitoring"
exit 1
fi
SLEEP_SECONDS=10
component=$1
echo "Start kubernetes health monitoring for ${component}"
source "${KUBE_ENV}"
if [[ "${component}" == "container-runtime" ]]; then
container_runtime_monitoring
elif [[ "${component}" == "kubelet" ]]; then
kubelet_monitoring
else
echo "Health monitoring for component "${component}" is not supported!"
fi

View File

@ -1,32 +0,0 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for GCI distro
# Creates the GCI specific metadata files if they do not exit.
# Assumed var
# KUBE_TEMP
function ensure-gci-metadata-files {
if [[ ! -f "${KUBE_TEMP}/gci-update.txt" ]]; then
echo -n "update_disabled" > "${KUBE_TEMP}/gci-update.txt"
fi
if [[ ! -f "${KUBE_TEMP}/gci-ensure-gke-docker.txt" ]]; then
echo -n "true" > "${KUBE_TEMP}/gci-ensure-gke-docker.txt"
fi
if [[ ! -f "${KUBE_TEMP}/gci-docker-version.txt" ]]; then
echo -n "${GCI_DOCKER_VERSION:-}" > "${KUBE_TEMP}/gci-docker-version.txt"
fi
}

View File

@ -1,167 +0,0 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for GCI distro
source "${KUBE_ROOT}/cluster/gce/gci/helper.sh"
# create-master-instance creates the master instance. If called with
# an argument, the argument is used as the name to a reserved IP
# address for the master. (In the case of upgrade/repair, we re-use
# the same IP.)
#
# It requires a whole slew of assumed variables, partially due to to
# the call to write-master-env. Listing them would be rather
# futile. Instead, we list the required calls to ensure any additional
#
# variables are set:
# ensure-temp-dir
# detect-project
# get-bearer-token
function create-master-instance {
local address=""
[[ -n ${1:-} ]] && address="${1}"
write-master-env
ensure-gci-metadata-files
create-master-instance-internal "${MASTER_NAME}" "${address}"
}
function replicate-master-instance() {
local existing_master_zone="${1}"
local existing_master_name="${2}"
local existing_master_replicas="${3}"
local kube_env="$(get-metadata "${existing_master_zone}" "${existing_master_name}" kube-env)"
# Substitute INITIAL_ETCD_CLUSTER to enable etcd clustering.
kube_env="$(echo "${kube_env}" | grep -v "INITIAL_ETCD_CLUSTER")"
kube_env="$(echo -e "${kube_env}\nINITIAL_ETCD_CLUSTER: '${existing_master_replicas},${REPLICA_NAME}'")"
# Substitute INITIAL_ETCD_CLUSTER_STATE
kube_env="$(echo "${kube_env}" | grep -v "INITIAL_ETCD_CLUSTER_STATE")"
kube_env="$(echo -e "${kube_env}\nINITIAL_ETCD_CLUSTER_STATE: 'existing'")"
ETCD_CA_KEY="$(echo "${kube_env}" | grep "ETCD_CA_KEY" | sed "s/^.*: '//" | sed "s/'$//")"
ETCD_CA_CERT="$(echo "${kube_env}" | grep "ETCD_CA_CERT" | sed "s/^.*: '//" | sed "s/'$//")"
create-etcd-certs "${REPLICA_NAME}" "${ETCD_CA_CERT}" "${ETCD_CA_KEY}"
kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_KEY")"
kube_env="$(echo -e "${kube_env}\nETCD_PEER_KEY: '${ETCD_PEER_KEY_BASE64}'")"
kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_CERT")"
kube_env="$(echo -e "${kube_env}\nETCD_PEER_CERT: '${ETCD_PEER_CERT_BASE64}'")"
echo "${kube_env}" > ${KUBE_TEMP}/master-kube-env.yaml
get-metadata "${existing_master_zone}" "${existing_master_name}" cluster-name > "${KUBE_TEMP}/cluster-name.txt"
get-metadata "${existing_master_zone}" "${existing_master_name}" gci-update-strategy > "${KUBE_TEMP}/gci-update.txt"
get-metadata "${existing_master_zone}" "${existing_master_name}" gci-ensure-gke-docker > "${KUBE_TEMP}/gci-ensure-gke-docker.txt"
get-metadata "${existing_master_zone}" "${existing_master_name}" gci-docker-version > "${KUBE_TEMP}/gci-docker-version.txt"
get-metadata "${existing_master_zone}" "${existing_master_name}" kube-master-certs > "${KUBE_TEMP}/kube-master-certs.yaml"
get-metadata "${existing_master_zone}" "${existing_master_name}" cluster-location > "${KUBE_TEMP}/cluster-location.txt"
create-master-instance-internal "${REPLICA_NAME}"
}
function create-master-instance-internal() {
local gcloud="gcloud"
local retries=5
local sleep_sec=10
if [[ "${MASTER_SIZE##*-}" -ge 64 ]]; then # remove everything up to last dash (inclusive)
# Workaround for #55777
retries=30
sleep_sec=60
fi
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
gcloud="gcloud beta"
fi
local -r master_name="${1}"
local -r address="${2:-}"
local preemptible_master=""
if [[ "${PREEMPTIBLE_MASTER:-}" == "true" ]]; then
preemptible_master="--preemptible --maintenance-policy TERMINATE"
fi
local enable_ip_aliases
if [[ "${NODE_IPAM_MODE:-}" == "CloudAllocator" ]]; then
enable_ip_aliases=true
else
enable_ip_aliases=false
fi
local network=$(make-gcloud-network-argument \
"${NETWORK_PROJECT}" "${REGION}" "${NETWORK}" "${SUBNETWORK:-}" \
"${address:-}" "${enable_ip_aliases:-}" "${IP_ALIAS_SIZE:-}")
local metadata="kube-env=${KUBE_TEMP}/master-kube-env.yaml"
metadata="${metadata},kubelet-config=${KUBE_TEMP}/master-kubelet-config.yaml"
metadata="${metadata},user-data=${KUBE_ROOT}/cluster/gce/gci/master.yaml"
metadata="${metadata},configure-sh=${KUBE_ROOT}/cluster/gce/gci/configure.sh"
metadata="${metadata},cluster-location=${KUBE_TEMP}/cluster-location.txt"
metadata="${metadata},cluster-name=${KUBE_TEMP}/cluster-name.txt"
metadata="${metadata},gci-update-strategy=${KUBE_TEMP}/gci-update.txt"
metadata="${metadata},gci-ensure-gke-docker=${KUBE_TEMP}/gci-ensure-gke-docker.txt"
metadata="${metadata},gci-docker-version=${KUBE_TEMP}/gci-docker-version.txt"
metadata="${metadata},kube-master-certs=${KUBE_TEMP}/kube-master-certs.yaml"
metadata="${metadata},cluster-location=${KUBE_TEMP}/cluster-location.txt"
metadata="${metadata},${MASTER_EXTRA_METADATA}"
local disk="name=${master_name}-pd"
disk="${disk},device-name=master-pd"
disk="${disk},mode=rw"
disk="${disk},boot=no"
disk="${disk},auto-delete=no"
for attempt in $(seq 1 ${retries}); do
if result=$(${gcloud} compute instances create "${master_name}" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--machine-type "${MASTER_SIZE}" \
--image-project="${MASTER_IMAGE_PROJECT}" \
--image "${MASTER_IMAGE}" \
--tags "${MASTER_TAG}" \
--scopes "storage-ro,compute-rw,monitoring,logging-write" \
--metadata-from-file "${metadata}" \
--disk "${disk}" \
--boot-disk-size "${MASTER_ROOT_DISK_SIZE}" \
${MASTER_MIN_CPU_ARCHITECTURE:+"--min-cpu-platform=${MASTER_MIN_CPU_ARCHITECTURE}"} \
${preemptible_master} \
${network} 2>&1); then
echo "${result}" >&2
return 0
else
echo "${result}" >&2
if [[ ! "${result}" =~ "try again later" ]]; then
echo "Failed to create master instance due to non-retryable error" >&2
return 1
fi
sleep $sleep_sec
fi
done
echo "Failed to create master instance despite ${retries} attempts" >&2
return 1
}
function get-metadata() {
local zone="${1}"
local name="${2}"
local key="${3}"
gcloud compute ssh "${name}" \
--project "${PROJECT}" \
--zone "${zone}" \
--command "curl \"http://metadata.google.internal/computeMetadata/v1/instance/attributes/${key}\" -H \"Metadata-Flavor: Google\"" 2>/dev/null
}

View File

@ -1,128 +0,0 @@
#cloud-config
write_files:
- path: /etc/systemd/system/kube-master-installation.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Download and install k8s binaries and configurations
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/bin/mkdir -p /home/kubernetes/bin
ExecStartPre=/bin/mount --bind /home/kubernetes/bin /home/kubernetes/bin
ExecStartPre=/bin/mount -o remount,exec /home/kubernetes/bin
# Use --retry-connrefused opt only if it's supported by curl.
ExecStartPre=/bin/bash -c 'OPT=""; if curl --help | grep -q -- "--retry-connrefused"; then OPT="--retry-connrefused"; fi; /usr/bin/curl --fail --retry 5 --retry-delay 3 $OPT --silent --show-error -H "X-Google-Metadata-Request: True" -o /home/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh'
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/configure.sh
ExecStart=/home/kubernetes/bin/configure.sh
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kube-master-configuration.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Configure kubernetes master
After=kube-master-installation.service
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/configure-helper.sh
ExecStart=/home/kubernetes/bin/configure-helper.sh
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kube-container-runtime-monitor.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Kubernetes health monitoring for container runtime
After=kube-master-configuration.service
[Service]
Restart=always
RestartSec=10
RemainAfterExit=yes
RemainAfterExit=yes
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/health-monitor.sh
ExecStart=/home/kubernetes/bin/health-monitor.sh container-runtime
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kubelet-monitor.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Kubernetes health monitoring for kubelet
After=kube-master-configuration.service
[Service]
Restart=always
RestartSec=10
RemainAfterExit=yes
RemainAfterExit=yes
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/health-monitor.sh
ExecStart=/home/kubernetes/bin/health-monitor.sh kubelet
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kube-logrotate.timer
permissions: 0644
owner: root
content: |
[Unit]
Description=Hourly kube-logrotate invocation
[Timer]
OnCalendar=hourly
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kube-logrotate.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Kubernetes log rotation
After=kube-master-configuration.service
[Service]
Type=oneshot
ExecStart=-/usr/sbin/logrotate /etc/logrotate.conf
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kubernetes.target
permissions: 0644
owner: root
content: |
[Unit]
Description=Kubernetes
[Install]
WantedBy=multi-user.target
runcmd:
- systemctl daemon-reload
- systemctl enable kube-master-installation.service
- systemctl enable kube-master-configuration.service
- systemctl enable kube-container-runtime-monitor.service
- systemctl enable kubelet-monitor.service
- systemctl enable kube-logrotate.timer
- systemctl enable kube-logrotate.service
- systemctl enable kubernetes.target
- systemctl start kubernetes.target

View File

@ -1 +0,0 @@
mounter

View File

@ -1,31 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
go_binary(
name = "mounter",
embed = [":go_default_library"],
)
go_library(
name = "go_default_library",
srcs = ["mounter.go"],
importpath = "k8s.io/kubernetes/cluster/gce/gci/mounter",
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,7 +0,0 @@
## v1 (Thu Oct 20 2016 Vishnu Kannan <vishh@google.com>)
- Creating a container with mount tools pre-installed
- Digest: sha256:9b3c1f04ad6b8947af4eb98f1eff2dc54c5664e3469b4cdf722ec5dd2a1dc064
## v2 (Fri Oct 28 2016 Vishnu Kannan <vishh@google.com>)
- Adding netbase package.
- Digest: sha256:c7dfe059fbbf976fc4284a87eb18adf0f8e0c4cf30a30f5a852842c772a64c2d

View File

@ -1,19 +0,0 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:xenial
RUN apt-get update && apt-get install -y netbase nfs-common=1:1.2.8-9ubuntu12 glusterfs-client=3.7.6-1ubuntu1
ENTRYPOINT ["/bin/mount"]

View File

@ -1,30 +0,0 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TAG=v2
REGISTRY=staging-k8s.gcr.io
IMAGE=gci-mounter
all: container
container:
docker build --pull -t ${REGISTRY}/${IMAGE}:${TAG} .
push:
docker push ${REGISTRY}/${IMAGE}:${TAG}
upload:
./stage-upload.sh ${TAG} ${REGISTRY}/${IMAGE}:${TAG}
.PHONY: all container push

View File

@ -1,93 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
)
const (
// Location of the mount file to use
chrootCmd = "chroot"
mountCmd = "mount"
rootfs = "rootfs"
nfsRPCBindErrMsg = "mount.nfs: rpc.statd is not running but is required for remote locking.\nmount.nfs: Either use '-o nolock' to keep locks local, or start statd.\nmount.nfs: an incorrect mount option was specified\n"
rpcBindCmd = "/sbin/rpcbind"
defaultRootfs = "/home/kubernetes/containerized_mounter/rootfs"
)
func main() {
if len(os.Args) < 2 {
fmt.Fprintf(os.Stderr, "Command failed: must provide a command to run.\n")
return
}
path, _ := filepath.Split(os.Args[0])
rootfsPath := filepath.Join(path, rootfs)
if _, err := os.Stat(rootfsPath); os.IsNotExist(err) {
rootfsPath = defaultRootfs
}
command := os.Args[1]
switch command {
case mountCmd:
mountErr := mountInChroot(rootfsPath, os.Args[2:])
if mountErr != nil {
fmt.Fprintf(os.Stderr, "Mount failed: %v", mountErr)
os.Exit(1)
}
default:
fmt.Fprintf(os.Stderr, "Unknown command, must be %s", mountCmd)
os.Exit(1)
}
}
// MountInChroot is to run mount within chroot with the passing root directory
func mountInChroot(rootfsPath string, args []string) error {
if _, err := os.Stat(rootfsPath); os.IsNotExist(err) {
return fmt.Errorf("path <%s> does not exist", rootfsPath)
}
args = append([]string{rootfsPath, mountCmd}, args...)
output, err := exec.Command(chrootCmd, args...).CombinedOutput()
if err == nil {
return nil
}
if !strings.EqualFold(string(output), nfsRPCBindErrMsg) {
// Mount failed but not because of RPC bind error
return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %v\nOutput: %s", err, chrootCmd, args, string(output))
}
// Mount failed because it is NFS V3 and we need to run rpcBind
output, err = exec.Command(chrootCmd, rootfsPath, rpcBindCmd, "-w").CombinedOutput()
if err != nil {
return fmt.Errorf("Mount issued for NFS V3 but unable to run rpcbind:\n Output: %s\n Error: %v", string(output), err)
}
// Rpcbind is running, try mounting again
output, err = exec.Command(chrootCmd, args...).CombinedOutput()
if err != nil {
return fmt.Errorf("Mount failed for NFS V3 even after running rpcBind %s, %v", string(output), err)
}
return nil
}

View File

@ -1,67 +0,0 @@
#!/bin/sh
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Due to the GCE custom metadata size limit, we split the entire script into two
# files configure.sh and configure-helper.sh. The functionality of downloading
# kubernetes configuration, manifests, docker images, and binary files are
# put in configure.sh, which is uploaded via GCE custom metadata.
set -o errexit
set -o pipefail
set -o nounset
DOCKER2ACI_VERSION="v0.13.0"
MOUNTER_VERSION=$1
DOCKER_IMAGE=docker://$2
MOUNTER_ACI_IMAGE=gci-mounter-${MOUNTER_VERSION}.aci
MOUNTER_GCS_DIR=gs://kubernetes-release/gci-mounter/
TMPDIR=/tmp
# Setup a working directory
DOWNLOAD_DIR=$(mktemp --tmpdir=${TMPDIR} -d gci-mounter-build.XXXXXXXXXX)
# Setup a staging directory
STAGING_DIR=$(mktemp --tmpdir=${TMPDIR} -d gci-mounter-staging.XXXXXXXXXX)
ACI_DIR=${STAGING_DIR}/gci-mounter
CWD=${PWD}
# Cleanup the temporary directories
function cleanup {
rm -rf ${DOWNLOAD_DIR}
rm -rf ${STAGING_DIR}
cd ${CWD}
}
# Delete temporary directories on exit
trap cleanup EXIT
mkdir ${ACI_DIR}
# Convert docker image to aci and stage it
echo "Downloading docker2aci ${DOCKER2ACI_VERSION}"
wget "https://github.com/appc/docker2aci/releases/download/${DOCKER2ACI_VERSION}/docker2aci-${DOCKER2ACI_VERSION}.tar.gz" &> /dev/null
echo "Extracting docker2aci ${DOCKER2ACI_VERSION}"
tar xzf docker2aci-${DOCKER2ACI_VERSION}.tar.gz
ACI_IMAGE=$(${DOWNLOAD_DIR}/docker2aci-${DOCKER2ACI_VERSION}/docker2aci ${DOCKER_IMAGE} 2>/dev/null | tail -n 1)
cp ${ACI_IMAGE} ${ACI_DIR}/${MOUNTER_ACI_IMAGE}
# Upload the contents to gcs
echo "Uploading gci mounter ACI in ${ACI_DIR} to ${MOUNTER_GCS_DIR}"
gsutil cp ${ACI_DIR}/${MOUNTER_ACI_IMAGE} ${MOUNTER_GCS_DIR}
echo "Upload completed"
echo "Updated gci-mounter ACI version and SHA1 in cluster/gce/gci/configure.sh"
echo "${MOUNTER_ACI_IMAGE} hash: $(sha1sum ${ACI_DIR}/${MOUNTER_ACI_IMAGE})"

View File

@ -1,41 +0,0 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for GCI distro
source "${KUBE_ROOT}/cluster/gce/gci/helper.sh"
function get-node-instance-metadata {
local metadata=""
metadata+="kube-env=${KUBE_TEMP}/node-kube-env.yaml,"
metadata+="kubelet-config=${KUBE_TEMP}/node-kubelet-config.yaml,"
metadata+="user-data=${KUBE_ROOT}/cluster/gce/gci/node.yaml,"
metadata+="configure-sh=${KUBE_ROOT}/cluster/gce/gci/configure.sh,"
metadata+="cluster-location=${KUBE_TEMP}/cluster-location.txt,"
metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt,"
metadata+="gci-update-strategy=${KUBE_TEMP}/gci-update.txt,"
metadata+="gci-ensure-gke-docker=${KUBE_TEMP}/gci-ensure-gke-docker.txt,"
metadata+="gci-docker-version=${KUBE_TEMP}/gci-docker-version.txt,"
metadata+="shutdown-script=${KUBE_ROOT}/cluster/gce/gci/shutdown.sh,"
metadata+="${NODE_EXTRA_METADATA}"
echo "${metadata}"
}
# $1: template name (required).
function create-node-instance-template {
local template_name="$1"
ensure-gci-metadata-files
create-node-template "$template_name" "${scope_flags[*]}" "$(get-node-instance-metadata)"
}

View File

@ -1,128 +0,0 @@
#cloud-config
write_files:
- path: /etc/systemd/system/kube-node-installation.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Download and install k8s binaries and configurations
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/bin/mkdir -p /home/kubernetes/bin
ExecStartPre=/bin/mount --bind /home/kubernetes/bin /home/kubernetes/bin
ExecStartPre=/bin/mount -o remount,exec /home/kubernetes/bin
# Use --retry-connrefused opt only if it's supported by curl.
ExecStartPre=/bin/bash -c 'OPT=""; if curl --help | grep -q -- "--retry-connrefused"; then OPT="--retry-connrefused"; fi; /usr/bin/curl --fail --retry 5 --retry-delay 3 $OPT --silent --show-error -H "X-Google-Metadata-Request: True" -o /home/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh'
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/configure.sh
ExecStart=/home/kubernetes/bin/configure.sh
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kube-node-configuration.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Configure kubernetes node
After=kube-node-installation.service
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/configure-helper.sh
ExecStart=/home/kubernetes/bin/configure-helper.sh
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kube-container-runtime-monitor.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Kubernetes health monitoring for container runtime
After=kube-node-configuration.service
[Service]
Restart=always
RestartSec=10
RemainAfterExit=yes
RemainAfterExit=yes
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/health-monitor.sh
ExecStart=/home/kubernetes/bin/health-monitor.sh container-runtime
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kubelet-monitor.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Kubernetes health monitoring for kubelet
After=kube-node-configuration.service
[Service]
Restart=always
RestartSec=10
RemainAfterExit=yes
RemainAfterExit=yes
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/health-monitor.sh
ExecStart=/home/kubernetes/bin/health-monitor.sh kubelet
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kube-logrotate.timer
permissions: 0644
owner: root
content: |
[Unit]
Description=Hourly kube-logrotate invocation
[Timer]
OnCalendar=hourly
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kube-logrotate.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Kubernetes log rotation
After=kube-node-configuration.service
[Service]
Type=oneshot
ExecStart=-/usr/sbin/logrotate /etc/logrotate.conf
[Install]
WantedBy=kubernetes.target
- path: /etc/systemd/system/kubernetes.target
permissions: 0644
owner: root
content: |
[Unit]
Description=Kubernetes
[Install]
WantedBy=multi-user.target
runcmd:
- systemctl daemon-reload
- systemctl enable kube-node-installation.service
- systemctl enable kube-node-configuration.service
- systemctl enable kube-container-runtime-monitor.service
- systemctl enable kubelet-monitor.service
- systemctl enable kube-logrotate.timer
- systemctl enable kube-logrotate.service
- systemctl enable kubernetes.target
- systemctl start kubernetes.target

View File

@ -1,23 +0,0 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A script that let's gci preemptible nodes gracefully terminate in the event of a VM shutdown.
preemptible=$(curl "http://metadata.google.internal/computeMetadata/v1/instance/scheduling/preemptible" -H "Metadata-Flavor: Google")
if [ ${preemptible} == "TRUE" ]; then
echo "Shutting down! Sleeping for a minute to let the node gracefully terminate"
# https://cloud.google.com/compute/docs/instances/stopping-or-deleting-an-instance#delete_timeout
sleep 30
fi

View File

@ -1,97 +0,0 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Calls gcloud to print out a variety of Google Cloud Platform resources used by
# Kubernetes. Can be run before/after test runs and compared to track leaking
# resources.
# PROJECT must be set in the environment.
# If ZONE, KUBE_GCE_INSTANCE_PREFIX, CLUSTER_NAME, KUBE_GCE_NETWORK, or
# KUBE_GKE_NETWORK is set, they will be used to filter the results.
set -o errexit
set -o nounset
set -o pipefail
ZONE=${ZONE:-}
REGION=${ZONE%-*}
INSTANCE_PREFIX=${KUBE_GCE_INSTANCE_PREFIX:-${CLUSTER_NAME:-}}
NETWORK=${KUBE_GCE_NETWORK:-${KUBE_GKE_NETWORK:-}}
# In GKE the instance prefix starts with "gke-".
if [[ "${KUBERNETES_PROVIDER:-}" == "gke" ]]; then
INSTANCE_PREFIX="gke-${CLUSTER_NAME}"
# Truncate to 26 characters for route prefix matching.
INSTANCE_PREFIX="${INSTANCE_PREFIX:0:26}"
fi
# Usage: gcloud-list <group> <resource> <additional parameters to gcloud...>
# GREP_REGEX is applied to the output of gcloud if set
GREP_REGEX=""
function gcloud-list() {
local -r group=$1
local -r resource=$2
local -r filter=${3:-}
echo -e "\n\n[ ${group} ${resource} ]"
local attempt=1
local result=""
while true; do
if result=$(gcloud ${group} ${resource} list --project=${PROJECT} ${filter:+--filter="$filter"} ${@:4}); then
if [[ ! -z "${GREP_REGEX}" ]]; then
result=$(echo "${result}" | grep "${GREP_REGEX}" || true)
fi
echo "${result}"
return
fi
echo -e "Attempt ${attempt} failed to list ${resource}. Retrying." >&2
attempt=$(($attempt+1))
if [[ ${attempt} -gt 5 ]]; then
echo -e "List ${resource} failed!" >&2
exit 2
fi
sleep $((5*${attempt}))
done
}
echo "Project: ${PROJECT}"
echo "Region: ${REGION}"
echo "Zone: ${ZONE}"
echo "Instance prefix: ${INSTANCE_PREFIX:-}"
echo "Network: ${NETWORK}"
echo "Provider: ${KUBERNETES_PROVIDER:-}"
# List resources related to instances, filtering by the instance prefix if
# provided.
gcloud-list compute instance-templates "name ~ '${INSTANCE_PREFIX}.*'"
gcloud-list compute instance-groups "${ZONE:+"zone:(${ZONE}) AND "}name ~ '${INSTANCE_PREFIX}.*'"
gcloud-list compute instances "${ZONE:+"zone:(${ZONE}) AND "}name ~ '${INSTANCE_PREFIX}.*'"
# List disk resources, filtering by instance prefix if provided.
gcloud-list compute disks "${ZONE:+"zone:(${ZONE}) AND "}name ~ '${INSTANCE_PREFIX}.*'"
# List network resources. We include names starting with "a", corresponding to
# those that Kubernetes creates.
gcloud-list compute addresses "${REGION:+"region=(${REGION}) AND "}name ~ 'a.*|${INSTANCE_PREFIX}.*'"
# Match either the header or a line with the specified e2e network.
# This assumes that the network name is the second field in the output.
GREP_REGEX="^NAME\|^[^ ]\+[ ]\+\(default\|${NETWORK}\) "
gcloud-list compute routes "name ~ 'default.*|${INSTANCE_PREFIX}.*'"
gcloud-list compute firewall-rules "name ~ 'default.*|k8s-fw.*|${INSTANCE_PREFIX}.*'"
GREP_REGEX=""
gcloud-list compute forwarding-rules ${REGION:+"region=(${REGION})"}
gcloud-list compute target-pools ${REGION:+"region=(${REGION})"}
gcloud-list logging sinks

View File

@ -1,44 +0,0 @@
package(default_visibility = ["//visibility:public"])
load("@io_kubernetes_build//defs:build.bzl", "release_filegroup")
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
pkg_tar(
name = "gce-master-manifests",
srcs = [":manifests"],
mode = "0644",
)
# if you update this, also update function kube::release::package_kube_manifests_tarball() in build/lib/release.sh
filegroup(
name = "manifests",
srcs = [
"abac-authz-policy.jsonl",
"cluster-autoscaler.manifest",
"e2e-image-puller.manifest",
"etcd.manifest",
"etcd-empty-dir-cleanup.yaml",
"glbc.manifest",
"kms-plugin-container.manifest",
"kube-addon-manager.yaml",
"kube-apiserver.manifest",
"kube-controller-manager.manifest",
"kube-proxy.manifest",
"kube-scheduler.manifest",
"rescheduler.manifest",
] + glob(["internal-*"]),
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,7 +0,0 @@
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"admin", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"{{kube_user}}", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kubelet", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kube_proxy", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kubecfg", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"client", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"group":"system:serviceaccounts", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}

View File

@ -1,107 +0,0 @@
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "cluster-autoscaler",
"namespace": "kube-system",
"labels": {
"tier": "cluster-management",
"component": "cluster-autoscaler"
},
"annotations": {
"seccomp.security.alpha.kubernetes.io/pod": "docker/default"
}
},
"spec": {
"hostNetwork": true,
"containers": [
{
"name": "cluster-autoscaler",
"image": "k8s.gcr.io/cluster-autoscaler:v1.3.0",
"livenessProbe": {
"httpGet": {
"path": "/health-check",
"port": 8085
},
"initialDelaySeconds": 600,
"periodSeconds": 60
},
"command": [
"./run.sh",
"--kubernetes=https://127.0.0.1:443",
"--v=4",
"--logtostderr=true",
"--write-status-configmap=true",
"--balance-similar-node-groups=true",
"{{params}}"
],
"env": [
{
"name": "LOG_OUTPUT",
"value": "/var/log/cluster-autoscaler.log"
}
],
"resources": {
"requests": {
"cpu": "10m",
"memory": "300Mi"
}
},
"volumeMounts": [
{{cloud_config_mount}}
{
"name": "ssl-certs",
"readOnly": true,
"mountPath": "/etc/ssl/certs"
},
{
"name": "usrsharecacerts",
"readOnly": true,
"mountPath": "/usr/share/ca-certificates"
},
{
"name": "srvkube",
"readOnly": true,
"mountPath": "/etc/srv/kubernetes/cluster-autoscaler"
},
{
"name": "logfile",
"mountPath": "/var/log/cluster-autoscaler.log",
"readOnly": false
}
],
"terminationMessagePath": "/dev/termination-log",
"imagePullPolicy": "IfNotPresent"
}
],
"volumes": [
{{cloud_config_volume}}
{
"name": "ssl-certs",
"hostPath": {
"path": "/etc/ssl/certs"
}
},
{
"name": "usrsharecacerts",
"hostPath": {
"path": "/usr/share/ca-certificates"
}
},
{
"name": "srvkube",
"hostPath": {
"path": "/etc/srv/kubernetes/cluster-autoscaler"
}
},
{
"name": "logfile",
"hostPath": {
"path": "/var/log/cluster-autoscaler.log",
"type": "FileOrCreate"
}
}
],
"restartPolicy": "Always"
}
}

View File

@ -1,117 +0,0 @@
# e2e-image-puller seeds nodes in an e2e cluster with test images.
apiVersion: v1
kind: Pod
metadata:
name: e2e-image-puller
namespace: kube-system
labels:
name: e2e-image-puller
spec:
containers:
- name: image-puller
resources:
requests:
cpu: 100m
limits:
cpu: 100m
image: k8s.gcr.io/busybox:1.24
# TODO: Replace this with a go script that pulls in parallel?
# Currently it takes ~5m to pull all e2e images, so this is OK, and
# fewer moving parts is always better.
# TODO: Replace the hardcoded image list with an autogen list; the list is
# currently hard-coded for static verification. It was generated via:
# grep -Iiroh "gcr.io/.*" "${KUBE_ROOT}/test/e2e" | \
# sed -e "s/[,\")}]//g" | awk '{print $1}' | sort | uniq | tr '\n' ' '
# We always want the subshell to exit 0 so this pod doesn't end up
# blocking tests in an Error state.
command:
- /bin/sh
- -c
- >
for i in
k8s.gcr.io/alpine-with-bash:1.0
k8s.gcr.io/apparmor-loader:0.1
k8s.gcr.io/busybox:1.24
k8s.gcr.io/dnsutils:e2e
k8s.gcr.io/e2e-net-amd64:1.0
k8s.gcr.io/echoserver:1.10
k8s.gcr.io/eptest:0.1
k8s.gcr.io/fakegitserver:0.1
k8s.gcr.io/galera-install:0.1
k8s.gcr.io/invalid-image:invalid-tag
k8s.gcr.io/iperf:e2e
k8s.gcr.io/jessie-dnsutils:e2e
k8s.gcr.io/k8s-dns-dnsmasq-amd64:1.14.5
k8s.gcr.io/liveness:e2e
k8s.gcr.io/logs-generator:v0.1.0
k8s.gcr.io/mounttest:0.8
k8s.gcr.io/mounttest-user:0.5
k8s.gcr.io/mysql-galera:e2e
k8s.gcr.io/mysql-healthz:1.0
k8s.gcr.io/netexec:1.4
k8s.gcr.io/netexec:1.5
k8s.gcr.io/netexec:1.7
k8s.gcr.io/nettest:1.7
k8s.gcr.io/nginx:1.7.9
k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.1
k8s.gcr.io/nginx-slim:0.7
k8s.gcr.io/nginx-slim:0.8
k8s.gcr.io/node-problem-detector:v0.3.0
k8s.gcr.io/pause
k8s.gcr.io/porter:4524579c0eb935c056c8e75563b4e1eda31587e0
k8s.gcr.io/portforwardtester:1.2
k8s.gcr.io/redis-install-3.2.0:e2e
k8s.gcr.io/resource_consumer:beta4
k8s.gcr.io/resource_consumer/controller:beta4
gcr.io/kubernetes-e2e-test-images/serve-hostname-amd64:1.1
gcr.io/kubernetes-e2e-test-images/hostexec-amd64:1.1
k8s.gcr.io/servicelb:0.1
k8s.gcr.io/test-webserver:e2e
k8s.gcr.io/update-demo:kitten
k8s.gcr.io/update-demo:nautilus
gcr.io/kubernetes-e2e-test-images/volume-ceph:0.1
gcr.io/kubernetes-e2e-test-images/volume-gluster:0.2
gcr.io/kubernetes-e2e-test-images/volume-iscsi:0.1
gcr.io/kubernetes-e2e-test-images/volume-nfs:0.8
gcr.io/kubernetes-e2e-test-images/volume-rbd:0.1
k8s.gcr.io/zookeeper-install-3.5.0-alpha:e2e
gcr.io/google_samples/gb-redisslave:nonexistent
; do echo $(date '+%X') pulling $i; crictl pull $i 1>/dev/null; done; exit 0;
securityContext:
privileged: true
volumeMounts:
- mountPath: {{ container_runtime_endpoint }}
name: socket
- mountPath: /usr/bin/crictl
name: crictl
- mountPath: /etc/crictl.yaml
name: config
# Add a container that runs a health-check
- name: nethealth-check
resources:
requests:
cpu: 100m
limits:
cpu: 100m
image: k8s.gcr.io/kube-nethealth-amd64:1.0
command:
- /bin/sh
- -c
- "/usr/bin/nethealth || true"
volumes:
- hostPath:
path: {{ container_runtime_endpoint }}
type: Socket
name: socket
- hostPath:
path: /home/kubernetes/bin/crictl
type: File
name: crictl
- hostPath:
path: /etc/crictl.yaml
type: File
name: config
# This pod is really fire-and-forget.
restartPolicy: OnFailure
# This pod needs hostNetworking for true VM perf measurement as well as avoiding cbr0 issues
hostNetwork: true

View File

@ -1,17 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: etcd-empty-dir-cleanup
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
labels:
k8s-app: etcd-empty-dir-cleanup
spec:
priorityClassName: system-node-critical
hostNetwork: true
dnsPolicy: Default
containers:
- name: etcd-empty-dir-cleanup
image: k8s.gcr.io/etcd-empty-dir-cleanup:3.2.18.0

View File

@ -1,104 +0,0 @@
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"etcd-server{{ suffix }}",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": "",
"seccomp.security.alpha.kubernetes.io/pod": "docker/default"
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "etcd-container",
"image": "{{ pillar.get('etcd_docker_repository', 'k8s.gcr.io/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.2.18-0') }}",
"resources": {
"requests": {
"cpu": {{ cpulimit }}
}
},
"command": [
"/bin/sh",
"-c",
"if [ -e /usr/local/bin/migrate-if-needed.sh ]; then /usr/local/bin/migrate-if-needed.sh 1>>/var/log/etcd{{ suffix }}.log 2>&1; fi; exec /usr/local/bin/etcd --name etcd-{{ hostname }} --listen-peer-urls {{ etcd_protocol }}://{{ host_ip }}:{{ server_port }} --initial-advertise-peer-urls {{ etcd_protocol }}://{{ hostname }}:{{ server_port }} --advertise-client-urls http://127.0.0.1:{{ port }} --listen-client-urls http://127.0.0.1:{{ port }} {{ quota_bytes }} --data-dir /var/etcd/data{{ suffix }} --initial-cluster-state {{ cluster_state }} --initial-cluster {{ etcd_cluster }} {{ etcd_creds }} {{ etcd_extra_args }} 1>>/var/log/etcd{{ suffix }}.log 2>&1"
],
"env": [
{ "name": "TARGET_STORAGE",
"value": "{{ pillar.get('storage_backend', 'etcd3') }}"
},
{ "name": "TARGET_VERSION",
"value": "{{ pillar.get('etcd_version', '3.2.18') }}"
},
{ "name": "DATA_DIRECTORY",
"value": "/var/etcd/data{{ suffix }}"
},
{ "name": "INITIAL_CLUSTER",
"value": "{{ etcd_cluster }}"
},
{ "name": "LISTEN_PEER_URLS",
"value": "{{ etcd_protocol }}://{{ host_ip }}:{{ server_port }}"
},
{ "name": "INITIAL_ADVERTISE_PEER_URLS",
"value": "{{ etcd_protocol }}://{{ hostname }}:{{ server_port }}"
},
{ "name": "ETCD_CREDS",
"value": "{{ etcd_creds }}"
},
{ "name": "ETCD_SNAPSHOT_COUNT",
"value": "10000"
}
],
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": {{ port }},
"path": "/health"
},
"initialDelaySeconds": {{ liveness_probe_initial_delay }},
"timeoutSeconds": 15
},
"ports": [
{ "name": "serverport",
"containerPort": {{ server_port }},
"hostPort": {{ server_port }}
},
{ "name": "clientport",
"containerPort": {{ port }},
"hostPort": {{ port }}
}
],
"volumeMounts": [
{ "name": "varetcd",
"mountPath": "/var/etcd",
"readOnly": false
},
{ "name": "varlogetcd",
"mountPath": "/var/log/etcd{{ suffix }}.log",
"readOnly": false
},
{ "name": "etc",
"mountPath": "{{ srv_kube_path }}",
"readOnly": false
}
]
}
],
"volumes":[
{ "name": "varetcd",
"hostPath": {
"path": "/mnt/master-pd/var/etcd"}
},
{ "name": "varlogetcd",
"hostPath": {
"path": "/var/log/etcd{{ suffix }}.log",
"type": "FileOrCreate"}
},
{ "name": "etc",
"hostPath": {
"path": "{{ srv_kube_path }}"}
}
]
}}

View File

@ -1,57 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: l7-lb-controller-v1.1.1
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
labels:
k8s-app: gcp-lb-controller
version: v1.1.1
kubernetes.io/name: "GLBC"
spec:
terminationGracePeriodSeconds: 600
hostNetwork: true
containers:
- image: k8s.gcr.io/ingress-gce-glbc-amd64:v1.1.1
livenessProbe:
httpGet:
path: /healthz
port: 8086
scheme: HTTP
initialDelaySeconds: 30
# healthz reaches out to GCE
periodSeconds: 30
timeoutSeconds: 15
successThreshold: 1
failureThreshold: 5
name: l7-lb-controller
volumeMounts:
- mountPath: /etc/gce.conf
name: cloudconfig
readOnly: true
- mountPath: /var/log/glbc.log
name: logfile
readOnly: false
resources:
# Request is set to accommodate this pod alongside the other
# master components on a single core master.
# TODO: Make resource requirements depend on the size of the cluster
requests:
cpu: 10m
memory: 50Mi
command:
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
- sh
- -c
- 'exec /glbc --gce-ratelimit=ga.Operations.Get,qps,10,100 --gce-ratelimit=alpha.Operations.Get,qps,10,100 --gce-ratelimit=ga.BackendServices.Get,qps,1.8,1 --gce-ratelimit=ga.HealthChecks.Get,qps,1.8,1 --gce-ratelimit=alpha.HealthChecks.Get,qps,1.8,1 --verbose --apiserver-host=http://localhost:8080 --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1'
volumes:
- hostPath:
path: /etc/gce.conf
type: FileOrCreate
name: cloudconfig
- hostPath:
path: /var/log/glbc.log
type: FileOrCreate
name: logfile

View File

@ -1,8 +0,0 @@
{
"name": "kms-plugin",
"image": "gcr.io/google-containers/k8s-cloud-kms-plugin:v0.1.1",
"command": ["/k8s-cloud-kms-plugin", "--key-uri={{kms_key_uri}}", "--path-to-unix-socket={{kms_path_to_socket}}", "--gce-config={{gce_conf_path}}", "--logtostderr", "2>\&1"],
"livenessProbe": { "httpGet": {"host": "127.0.0.1", "port": 8081, "path": "/healthz"}, "initialDelaySeconds": 3, "timeoutSeconds": 3},
"ports":[{ "name": "healthz", "containerPort": 8081, "hostPort": 8081}, { "name": "metrics", "containerPort": 8082, "hostPort": 8082}],
"volumeMounts": [{{cloud_config_mount}}, {{kms_socket_mount}}]
}

View File

@ -1,39 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-addon-manager
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
labels:
component: kube-addon-manager
spec:
hostNetwork: true
containers:
- name: kube-addon-manager
# When updating version also bump it in:
# - test/kubemark/resources/manifests/kube-addon-manager.yaml
image: k8s.gcr.io/kube-addon-manager:v8.6
command:
- /bin/bash
- -c
- exec /opt/kube-addons.sh 1>>/var/log/kube-addon-manager.log 2>&1
resources:
requests:
cpu: 5m
memory: 50Mi
volumeMounts:
- mountPath: /etc/kubernetes/
name: addons
readOnly: true
- mountPath: /var/log
name: varlog
readOnly: false
volumes:
- hostPath:
path: /etc/kubernetes/
name: addons
- hostPath:
path: /var/log
name: varlog

View File

@ -1,142 +0,0 @@
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"kube-apiserver",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": "",
"seccomp.security.alpha.kubernetes.io/pod": "docker/default"
},
"labels": {
"tier": "control-plane",
"component": "kube-apiserver"
}
},
"spec":{
"hostNetwork": true,
"containers":[
{{kms_plugin_container}}
{
"name": "kube-apiserver",
"image": "{{pillar['kube_docker_registry']}}/kube-apiserver:{{pillar['kube-apiserver_docker_tag']}}",
"resources": {
"requests": {
"cpu": "250m"
}
},
"command": [
"/bin/sh",
"-c",
"exec /usr/local/bin/kube-apiserver {{params}} --allow-privileged={{pillar['allow_privileged']}} 1>>/var/log/kube-apiserver.log 2>&1"
],
{{container_env}}
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": 8080,
"path": "/healthz"
},
"initialDelaySeconds": {{liveness_probe_initial_delay}},
"timeoutSeconds": 15
},
"ports":[
{ "name": "https",
"containerPort": {{secure_port}},
"hostPort": {{secure_port}}},{
"name": "local",
"containerPort": 8080,
"hostPort": 8080}
],
"volumeMounts": [
{{kms_socket_mount}}
{{encryption_provider_mount}}
{{cloud_config_mount}}
{{additional_cloud_config_mount}}
{{webhook_config_mount}}
{{webhook_authn_config_mount}}
{{audit_policy_config_mount}}
{{audit_webhook_config_mount}}
{{admission_controller_config_mount}}
{{image_policy_webhook_config_mount}}
{ "name": "srvkube",
"mountPath": "{{srv_kube_path}}",
"readOnly": true},
{ "name": "logfile",
"mountPath": "/var/log/kube-apiserver.log",
"readOnly": false},
{ "name": "auditlogfile",
"mountPath": "/var/log/kube-apiserver-audit.log",
"readOnly": false},
{ "name": "etcssl",
"mountPath": "/etc/ssl",
"readOnly": true},
{ "name": "usrsharecacerts",
"mountPath": "/usr/share/ca-certificates",
"readOnly": true},
{ "name": "varssl",
"mountPath": "/var/ssl",
"readOnly": true},
{ "name": "etcopenssl",
"mountPath": "/etc/openssl",
"readOnly": true},
{ "name": "etcpki",
"mountPath": "/etc/srv/pki",
"readOnly": true},
{ "name": "srvsshproxy",
"mountPath": "{{srv_sshproxy_path}}",
"readOnly": false}
]
}
],
"volumes":[
{{kms_socket_volume}}
{{encryption_provider_volume}}
{{cloud_config_volume}}
{{additional_cloud_config_volume}}
{{webhook_config_volume}}
{{webhook_authn_config_volume}}
{{audit_policy_config_volume}}
{{audit_webhook_config_volume}}
{{admission_controller_config_volume}}
{{image_policy_webhook_config_volume}}
{ "name": "srvkube",
"hostPath": {
"path": "{{srv_kube_path}}"}
},
{ "name": "logfile",
"hostPath": {
"path": "/var/log/kube-apiserver.log",
"type": "FileOrCreate"}
},
{ "name": "auditlogfile",
"hostPath": {
"path": "/var/log/kube-apiserver-audit.log",
"type": "FileOrCreate"}
},
{ "name": "etcssl",
"hostPath": {
"path": "/etc/ssl"}
},
{ "name": "usrsharecacerts",
"hostPath": {
"path": "/usr/share/ca-certificates"}
},
{ "name": "varssl",
"hostPath": {
"path": "/var/ssl"}
},
{ "name": "etcopenssl",
"hostPath": {
"path": "/etc/openssl"}
},
{ "name": "etcpki",
"hostPath": {
"path": "/etc/srv/pki"}
},
{ "name": "srvsshproxy",
"hostPath": {
"path": "{{srv_sshproxy_path}}"}
}
]
}}

View File

@ -1,106 +0,0 @@
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"kube-controller-manager",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": "",
"seccomp.security.alpha.kubernetes.io/pod": "docker/default"
},
"labels": {
"tier": "control-plane",
"component": "kube-controller-manager"
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "kube-controller-manager",
"image": "{{pillar['kube_docker_registry']}}/kube-controller-manager:{{pillar['kube-controller-manager_docker_tag']}}",
"resources": {
"requests": {
"cpu": "200m"
}
},
"command": [
"/bin/sh",
"-c",
"exec /usr/local/bin/kube-controller-manager {{params}} 1>>/var/log/kube-controller-manager.log 2>&1"
],
{{container_env}}
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": 10252,
"path": "/healthz"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15
},
"volumeMounts": [
{{cloud_config_mount}}
{{additional_cloud_config_mount}}
{{pv_recycler_mount}}
{ "name": "srvkube",
"mountPath": "{{srv_kube_path}}",
"readOnly": true},
{{flexvolume_hostpath_mount}}
{ "name": "logfile",
"mountPath": "/var/log/kube-controller-manager.log",
"readOnly": false},
{ "name": "etcssl",
"mountPath": "/etc/ssl",
"readOnly": true},
{ "name": "usrsharecacerts",
"mountPath": "/usr/share/ca-certificates",
"readOnly": true},
{ "name": "varssl",
"mountPath": "/var/ssl",
"readOnly": true},
{ "name": "etcopenssl",
"mountPath": "/etc/openssl",
"readOnly": true},
{ "name": "etcpki",
"mountPath": "/etc/pki",
"readOnly": true}
]
}
],
"volumes":[
{{cloud_config_volume}}
{{additional_cloud_config_volume}}
{{pv_recycler_volume}}
{ "name": "srvkube",
"hostPath": {
"path": "{{srv_kube_path}}"}
},
{{flexvolume_hostpath}}
{ "name": "logfile",
"hostPath": {
"path": "/var/log/kube-controller-manager.log",
"type": "FileOrCreate"}
},
{ "name": "etcssl",
"hostPath": {
"path": "/etc/ssl"}
},
{ "name": "usrsharecacerts",
"hostPath": {
"path": "/usr/share/ca-certificates"}
},
{ "name": "varssl",
"hostPath": {
"path": "/var/ssl"}
},
{ "name": "etcopenssl",
"hostPath": {
"path": "/etc/openssl"}
},
{ "name": "etcpki",
"hostPath": {
"path": "/etc/pki"}
}
]
}}

View File

@ -1,78 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-proxy
namespace: kube-system
# This annotation ensures that kube-proxy does not get evicted if the node
# supports critical pod annotation based priority scheme.
# Note that kube-proxy runs as a static pod so this annotation does NOT have
# any effect on rescheduler (default scheduler and rescheduler are not
# involved in scheduling kube-proxy).
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
tier: node
component: kube-proxy
spec:
priorityClassName: system-node-critical
hostNetwork: true
tolerations:
- operator: "Exists"
effect: "NoExecute"
- operator: "Exists"
effect: "NoSchedule"
containers:
- name: kube-proxy
image: {{pillar['kube_docker_registry']}}/kube-proxy:{{pillar['kube-proxy_docker_tag']}}
resources:
requests:
cpu: {{ cpurequest }}
command:
- /bin/sh
- -c
- exec kube-proxy {{api_servers_with_port}} {{kubeconfig}} {{cluster_cidr}} --resource-container="" --oom-score-adj=-998 {{params}} 1>>/var/log/kube-proxy.log 2>&1
{{container_env}}
{{kube_cache_mutation_detector_env_name}}
{{kube_cache_mutation_detector_env_value}}
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/ssl/certs
name: etc-ssl-certs
readOnly: true
- mountPath: /usr/share/ca-certificates
name: usr-ca-certs
readOnly: true
- mountPath: /var/log
name: varlog
readOnly: false
- mountPath: /var/lib/kube-proxy/kubeconfig
name: kubeconfig
readOnly: false
- mountPath: /run/xtables.lock
name: iptableslock
readOnly: false
- mountPath: /lib/modules
name: lib-modules
readOnly: true
volumes:
- hostPath:
path: /usr/share/ca-certificates
name: usr-ca-certs
- hostPath:
path: /etc/ssl/certs
name: etc-ssl-certs
- hostPath:
path: /var/lib/kube-proxy/kubeconfig
type: FileOrCreate
name: kubeconfig
- hostPath:
path: /var/log
name: varlog
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: iptableslock
- name: lib-modules
hostPath:
path: /lib/modules

View File

@ -1,65 +0,0 @@
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"kube-scheduler",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": "",
"seccomp.security.alpha.kubernetes.io/pod": "docker/default"
},
"labels": {
"tier": "control-plane",
"component": "kube-scheduler"
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "kube-scheduler",
"image": "{{pillar['kube_docker_registry']}}/kube-scheduler:{{pillar['kube-scheduler_docker_tag']}}",
"resources": {
"requests": {
"cpu": "75m"
}
},
"command": [
"/bin/sh",
"-c",
"exec /usr/local/bin/kube-scheduler {{params}} 1>>/var/log/kube-scheduler.log 2>&1"
],
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": 10251,
"path": "/healthz"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15
},
"volumeMounts": [
{
"name": "logfile",
"mountPath": "/var/log/kube-scheduler.log",
"readOnly": false
},
{
"name": "srvkube",
"mountPath": "{{srv_kube_path}}",
"readOnly": true
}
]
}
],
"volumes":[
{
"name": "srvkube",
"hostPath": {"path": "{{srv_kube_path}}"}
},
{
"name": "logfile",
"hostPath": {"path": "/var/log/kube-scheduler.log", "type": "FileOrCreate"}
}
]
}}

View File

@ -1,36 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: rescheduler-v0.4.0
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
k8s-app: rescheduler
version: v0.4.0
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Rescheduler"
spec:
hostNetwork: true
containers:
- image: k8s.gcr.io/rescheduler:v0.4.0
name: rescheduler
volumeMounts:
- mountPath: /var/log/rescheduler.log
name: logfile
readOnly: false
# TODO: Make resource requirements depend on the size of the cluster
resources:
requests:
cpu: 10m
memory: 100Mi
command:
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
- sh
- -c
- 'exec /rescheduler --running-in-cluster=false 1>>/var/log/rescheduler.log 2>&1'
volumes:
- hostPath:
path: /var/log/rescheduler.log
type: FileOrCreate
name: logfile

View File

@ -1,171 +0,0 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# !!!EXPERIMENTAL!!! Upgrade a K8s cluster from routes to IP aliases for
# node connectivity on GCE. This is only for migration.
set -o errexit
set -o nounset
set -o pipefail
if [[ "${KUBERNETES_PROVIDER:-gce}" != "gce" ]]; then
echo "ERR: KUBERNETES_PROVIDER must be gce" >&2
exit 1
fi
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/hack/lib/util.sh"
source "${KUBE_ROOT}/cluster/kube-util.sh"
# Print the number of routes used for K8s cluster node connectivity.
#
# Assumed vars:
# PROJECT
function get-k8s-node-routes-count() {
local k8s_node_routes_count=$(gcloud compute routes list \
--project=${PROJECT} --filter='description=k8s-node-route' \
--format='value(name)' | wc -l)
echo -n "${k8s_node_routes_count}"
}
# Detect the subnetwork where the K8s cluster resides.
#
# Assumed vars:
# KUBE_MASTER
# PROJECT
# ZONE
# Vars set:
# IP_ALIAS_SUBNETWORK
function detect-k8s-subnetwork() {
local subnetwork_url=$(gcloud compute instances describe \
${KUBE_MASTER} --project=${PROJECT} --zone=${ZONE} \
--format='value(networkInterfaces[0].subnetwork)')
if [[ -n ${subnetwork_url} ]]; then
IP_ALIAS_SUBNETWORK=$(echo ${subnetwork_url##*/})
fi
}
# Set IP_ALIAS_SUBNETWORK's allowSubnetCidrRoutesOverlap to a boolean value.
# $1: true or false for the desired allowSubnetCidrRoutesOverlap.
#
# Assumed vars:
# IP_ALIAS_SUBNETWORK
# GCE_API_ENDPOINT
# PROJECT
# REGION
function set-allow-subnet-cidr-routes-overlap() {
local allow_subnet_cidr_routes_overlap
allow_subnet_cidr_routes_overlap=$(gcloud beta compute networks subnets \
describe ${IP_ALIAS_SUBNETWORK} --project=${PROJECT} --region=${REGION} \
--format='value(allowSubnetCidrRoutesOverlap)')
local allow_overlap=$1
if [ ${allow_subnet_cidr_routes_overlap,,} = ${allow_overlap} ]; then
echo "Subnet ${IP_ALIAS_SUBNETWORK}'s allowSubnetCidrRoutesOverlap is already set as $1"
return
fi
echo "Setting subnet \"${IP_ALIAS_SUBNETWORK}\" allowSubnetCidrRoutesOverlap to $1"
local fingerprint=$(gcloud beta compute networks subnets describe \
${IP_ALIAS_SUBNETWORK} --project=${PROJECT} --region=${REGION} \
--format='value(fingerprint)')
local access_token=$(gcloud auth print-access-token)
local request="{\"allowSubnetCidrRoutesOverlap\":$1, \"fingerprint\":\"${fingerprint}\"}"
local subnetwork_url="${GCE_API_ENDPOINT}projects/${PROJECT}/regions/${REGION}/subnetworks/${IP_ALIAS_SUBNETWORK}"
until curl -s --header "Content-Type: application/json" --header "Authorization: Bearer ${access_token}" \
-X PATCH -d "${request}" "${subnetwork_url}" --output /dev/null; do
printf "."
sleep 1
done
}
# Add secondary ranges to K8s subnet.
#
# Assumed vars:
# IP_ALIAS_SUBNETWORK
# PROJECT
# REGION
# CLUSTER_IP_RANGE
# SERVICE_CLUSTER_IP_RANGE
function add-k8s-subnet-secondary-ranges() {
local secondary_ranges=$(gcloud beta compute networks subnets describe "${IP_ALIAS_SUBNETWORK}" \
--project="${PROJECT}" --region="${REGION}" \
--format='value(secondaryIpRanges)')
if [[ "${secondary_ranges}" =~ "pods-default" && "${secondary_ranges}" =~ "services-default" ]]; then
echo "${secondary_ranges} already contains both pods-default and services-default secondary ranges"
return
fi
echo "Adding secondary ranges: pods-default (${CLUSTER_IP_RANGE}), services-default (${SERVICE_CLUSTER_IP_RANGE})"
until gcloud beta compute networks subnets update ${IP_ALIAS_SUBNETWORK} \
--project=${PROJECT} --region=${REGION} \
--add-secondary-ranges="pods-default=${CLUSTER_IP_RANGE},services-default=${SERVICE_CLUSTER_IP_RANGE}"; do
printf "."
sleep 1
done
}
# Delete all K8s node routes.
#
# Assumed vars:
# PROJECT
function delete-k8s-node-routes() {
local -a routes
local -r batch=200
routes=( $(gcloud compute routes list \
--project=${PROJECT} --filter='description=k8s-node-route' \
--format='value(name)') )
while (( "${#routes[@]}" > 0 )); do
echo Deleting k8s node routes "${routes[*]::${batch}}"
gcloud compute routes delete --project "${PROJECT}" --quiet "${routes[@]::${batch}}"
routes=( "${routes[@]:${batch}}" )
done
}
detect-project
detect-master
k8s_node_routes_count=$(get-k8s-node-routes-count)
if [[ "${k8s_node_routes_count}" -eq 0 ]]; then
echo "No k8s node routes found and IP alias should already be enabled. Exiting..."
exit 0
fi
echo "Found ${k8s_node_routes_count} K8s node routes. Proceeding to upgrade them to IP aliases based connectivity..."
detect-k8s-subnetwork
if [ -z ${IP_ALIAS_SUBNETWORK} ]; then
echo "No k8s cluster subnetwork found. Exiting..."
exit 1
fi
echo "k8s cluster sits on subnetwork \"${IP_ALIAS_SUBNETWORK}\""
set-allow-subnet-cidr-routes-overlap true
add-k8s-subnet-secondary-ranges
echo "Changing K8s master envs and restarting..."
export KUBE_GCE_IP_ALIAS_SUBNETWORK=${IP_ALIAS_SUBNETWORK}
export KUBE_GCE_NODE_IPAM_MODE="IPAMFromCluster"
export KUBE_GCE_ENABLE_IP_ALIASES=true
export SECONDARY_RANGE_NAME="pods-default"
export STORAGE_BACKEND="etcd3"
export STORAGE_MEDIA_TYPE="application/vnd.kubernetes.protobuf"
export ETCD_IMAGE=3.2.18-0
export ETCD_VERSION=3.2.18
# Upgrade master with updated kube envs
${KUBE_ROOT}/cluster/gce/upgrade.sh -M -l
delete-k8s-node-routes
set-allow-subnet-cidr-routes-overlap false

View File

@ -1,586 +0,0 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# !!!EXPERIMENTAL !!! Upgrade script for GCE. Expect this to get
# rewritten in Go in relatively short order, but it allows us to start
# testing the concepts.
set -o errexit
set -o nounset
set -o pipefail
if [[ "${KUBERNETES_PROVIDER:-gce}" != "gce" ]]; then
echo "!!! ${1} only works on GCE" >&2
exit 1
fi
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/hack/lib/util.sh"
source "${KUBE_ROOT}/cluster/kube-util.sh"
function usage() {
echo "!!! EXPERIMENTAL !!!"
echo ""
echo "${0} [-M | -N | -P] [-o] (-l | <version number or publication>)"
echo " Upgrades master and nodes by default"
echo " -M: Upgrade master only"
echo " -N: Upgrade nodes only"
echo " -P: Node upgrade prerequisites only (create a new instance template)"
echo " -c: Upgrade NODE_UPGRADE_PARALLELISM nodes in parallel (default=1) within a single instance group. The MIGs themselves are dealt serially."
echo " -o: Use os distro specified in KUBE_NODE_OS_DISTRIBUTION for new nodes. Options include 'debian' or 'gci'"
echo " -l: Use local(dev) binaries. This is only supported for master upgrades."
echo ""
echo ' Version number or publication is either a proper version number'
echo ' (e.g. "v1.0.6", "v1.2.0-alpha.1.881+376438b69c7612") or a version'
echo ' publication of the form <bucket>/<version> (e.g. "release/stable",'
echo ' "ci/latest-1"). Some common ones are:'
echo ' - "release/stable"'
echo ' - "release/latest"'
echo ' - "ci/latest"'
echo ' See the docs on getting builds for more information about version publication.'
echo ""
echo "(... Fetching current release versions ...)"
echo ""
# NOTE: IF YOU CHANGE THE FOLLOWING LIST, ALSO UPDATE test/e2e/cluster_upgrade.go
local release_stable
local release_latest
local ci_latest
release_stable=$(gsutil cat gs://kubernetes-release/release/stable.txt)
release_latest=$(gsutil cat gs://kubernetes-release/release/latest.txt)
ci_latest=$(gsutil cat gs://kubernetes-release-dev/ci/latest.txt)
echo "Right now, versions are as follows:"
echo " release/stable: ${0} ${release_stable}"
echo " release/latest: ${0} ${release_latest}"
echo " ci/latest: ${0} ${ci_latest}"
}
function print-node-version-info() {
echo "== $1 Node OS and Kubelet Versions =="
"${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o=jsonpath='{range .items[*]}name: "{.metadata.name}", osImage: "{.status.nodeInfo.osImage}", kubeletVersion: "{.status.nodeInfo.kubeletVersion}"{"\n"}{end}'
}
function upgrade-master() {
local num_masters
num_masters=$(get-master-replicas-count)
if [[ "${num_masters}" -gt 1 ]]; then
echo "Upgrade of master not supported if more than one master replica present. The current number of master replicas: ${num_masters}"
exit 1
fi
echo "== Upgrading master to '${SERVER_BINARY_TAR_URL}'. Do not interrupt, deleting master instance. =="
# Tries to figure out KUBE_USER/KUBE_PASSWORD by first looking under
# kubeconfig:username, and then under kubeconfig:username-basic-auth.
# TODO: KUBE_USER is used in generating ABAC policy which the
# apiserver may not have enabled. If it's enabled, we must have a user
# to generate a valid ABAC policy. If the username changes, should
# the script fail? Should we generate a default username and password
# if the section is missing in kubeconfig? Handle this better in 1.5.
get-kubeconfig-basicauth
get-kubeconfig-bearertoken
detect-master
parse-master-env
upgrade-master-env
# Delete the master instance. Note that the master-pd is created
# with auto-delete=no, so it should not be deleted.
gcloud compute instances delete \
--project "${PROJECT}" \
--quiet \
--zone "${ZONE}" \
"${MASTER_NAME}"
create-master-instance "${MASTER_NAME}-ip"
wait-for-master
}
function upgrade-master-env() {
echo "== Upgrading master environment variables. =="
# Generate the node problem detector token if it isn't present on the original
# master.
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" && "${NODE_PROBLEM_DETECTOR_TOKEN:-}" == "" ]]; then
NODE_PROBLEM_DETECTOR_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
fi
}
function wait-for-master() {
echo "== Waiting for new master to respond to API requests =="
local curl_auth_arg
if [[ -n ${KUBE_BEARER_TOKEN:-} ]]; then
curl_auth_arg=(-H "Authorization: Bearer ${KUBE_BEARER_TOKEN}")
elif [[ -n ${KUBE_PASSWORD:-} ]]; then
curl_auth_arg=(--user "${KUBE_USER}:${KUBE_PASSWORD}")
else
echo "can't get auth credentials for the current master"
exit 1
fi
until curl --insecure "${curl_auth_arg[@]}" --max-time 5 \
--fail --output /dev/null --silent "https://${KUBE_MASTER_IP}/healthz"; do
printf "."
sleep 2
done
echo "== Done =="
}
# Perform common upgrade setup tasks
#
# Assumed vars
# KUBE_VERSION
function prepare-upgrade() {
kube::util::ensure-temp-dir
detect-project
detect-subnetworks
detect-node-names # sets INSTANCE_GROUPS
write-cluster-location
write-cluster-name
tars_from_version
}
# Reads kube-env metadata from first node in NODE_NAMES.
#
# Assumed vars:
# NODE_NAMES
# PROJECT
# ZONE
function get-node-env() {
# TODO(zmerlynn): Make this more reliable with retries.
gcloud compute --project ${PROJECT} ssh --zone ${ZONE} ${NODE_NAMES[0]} --command \
"curl --fail --silent -H 'Metadata-Flavor: Google' \
'http://metadata/computeMetadata/v1/instance/attributes/kube-env'" 2>/dev/null
}
# Read os distro information from /os/release on node.
# $1: The name of node
#
# Assumed vars:
# PROJECT
# ZONE
function get-node-os() {
gcloud compute ssh "$1" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--command \
"cat /etc/os-release | grep \"^ID=.*\" | cut -c 4-"
}
# Assumed vars:
# KUBE_VERSION
# NODE_SCOPES
# NODE_INSTANCE_PREFIX
# PROJECT
# ZONE
#
# Vars set:
# KUBE_PROXY_TOKEN
# NODE_PROBLEM_DETECTOR_TOKEN
# CA_CERT_BASE64
# EXTRA_DOCKER_OPTS
# KUBELET_CERT_BASE64
# KUBELET_KEY_BASE64
function upgrade-nodes() {
prepare-node-upgrade
do-node-upgrade
}
function setup-base-image() {
if [[ "${env_os_distro}" == "false" ]]; then
echo "== Ensuring that new Node base OS image matched the existing Node base OS image"
NODE_OS_DISTRIBUTION=$(get-node-os "${NODE_NAMES[0]}")
if [[ "${NODE_OS_DISTRIBUTION}" == "cos" ]]; then
NODE_OS_DISTRIBUTION="gci"
fi
source "${KUBE_ROOT}/cluster/gce/${NODE_OS_DISTRIBUTION}/node-helper.sh"
# Reset the node image based on current os distro
set-node-image
fi
}
# prepare-node-upgrade creates a new instance template suitable for upgrading
# to KUBE_VERSION and echos a single line with the name of the new template.
#
# Assumed vars:
# KUBE_VERSION
# NODE_SCOPES
# NODE_INSTANCE_PREFIX
# PROJECT
# ZONE
#
# Vars set:
# SANITIZED_VERSION
# INSTANCE_GROUPS
# KUBE_PROXY_TOKEN
# NODE_PROBLEM_DETECTOR_TOKEN
# CA_CERT_BASE64
# EXTRA_DOCKER_OPTS
# KUBELET_CERT_BASE64
# KUBELET_KEY_BASE64
function prepare-node-upgrade() {
echo "== Preparing node upgrade (to ${KUBE_VERSION}). ==" >&2
setup-base-image
SANITIZED_VERSION=$(echo ${KUBE_VERSION} | sed 's/[\.\+]/-/g')
# TODO(zmerlynn): Refactor setting scope flags.
local scope_flags=
if [ -n "${NODE_SCOPES}" ]; then
scope_flags="--scopes ${NODE_SCOPES}"
else
scope_flags="--no-scopes"
fi
# Get required node env vars from exiting template.
local node_env=$(get-node-env)
KUBE_PROXY_TOKEN=$(get-env-val "${node_env}" "KUBE_PROXY_TOKEN")
NODE_PROBLEM_DETECTOR_TOKEN=$(get-env-val "${node_env}" "NODE_PROBLEM_DETECTOR_TOKEN")
CA_CERT_BASE64=$(get-env-val "${node_env}" "CA_CERT")
EXTRA_DOCKER_OPTS=$(get-env-val "${node_env}" "EXTRA_DOCKER_OPTS")
KUBELET_CERT_BASE64=$(get-env-val "${node_env}" "KUBELET_CERT")
KUBELET_KEY_BASE64=$(get-env-val "${node_env}" "KUBELET_KEY")
upgrade-node-env
# TODO(zmerlynn): How do we ensure kube-env is written in a ${version}-
# compatible way?
write-node-env
# TODO(zmerlynn): Get configure-vm script from ${version}. (Must plumb this
# through all create-node-instance-template implementations).
local template_name=$(get-template-name-from-version ${SANITIZED_VERSION})
create-node-instance-template "${template_name}"
# The following is echo'd so that callers can get the template name.
echo "Instance template name: ${template_name}"
echo "== Finished preparing node upgrade (to ${KUBE_VERSION}). ==" >&2
}
function upgrade-node-env() {
echo "== Upgrading node environment variables. =="
# Get the node problem detector token from master if it isn't present on
# the original node.
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" && "${NODE_PROBLEM_DETECTOR_TOKEN:-}" == "" ]]; then
detect-master
local master_env=$(get-master-env)
NODE_PROBLEM_DETECTOR_TOKEN=$(get-env-val "${master_env}" "NODE_PROBLEM_DETECTOR_TOKEN")
fi
}
# Upgrades a single node.
# $1: The name of the node
#
# Note: This is called multiple times from do-node-upgrade() in parallel, so should be thread-safe.
function do-single-node-upgrade() {
local -r instance="$1"
local kubectl_rc
local boot_id=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output=jsonpath='{.status.nodeInfo.bootID}' 2>&1) && kubectl_rc=$? || kubectl_rc=$?
if [[ "${kubectl_rc}" != 0 ]]; then
echo "== FAILED to get bootID ${instance} =="
echo "${boot_id}"
return ${kubectl_rc}
fi
# Drain node
echo "== Draining ${instance}. == " >&2
local drain_rc
"${KUBE_ROOT}/cluster/kubectl.sh" drain --delete-local-data --force --ignore-daemonsets "${instance}" \
&& drain_rc=$? || drain_rc=$?
if [[ "${drain_rc}" != 0 ]]; then
echo "== FAILED to drain ${instance} =="
return ${drain_rc}
fi
# Recreate instance
echo "== Recreating instance ${instance}. ==" >&2
local recreate_rc
local recreate=$(gcloud compute instance-groups managed recreate-instances "${group}" \
--project="${PROJECT}" \
--zone="${ZONE}" \
--instances="${instance}" 2>&1) && recreate_rc=$? || recreate_rc=$?
if [[ "${recreate_rc}" != 0 ]]; then
echo "== FAILED to recreate ${instance} =="
echo "${recreate}"
return ${recreate_rc}
fi
# Wait for node status to reflect a new boot ID. This guarantees us
# that the node status in the API is from a different boot. This
# does not guarantee that the status is from the upgraded node, but
# it is a best effort approximation.
echo "== Waiting for new node to be added to k8s. ==" >&2
while true; do
local new_boot_id=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output=jsonpath='{.status.nodeInfo.bootID}' 2>&1) && kubectl_rc=$? || kubectl_rc=$?
if [[ "${kubectl_rc}" != 0 ]]; then
echo "== FAILED to get node ${instance} =="
echo "${boot_id}"
echo " (Will retry.)"
elif [[ "${boot_id}" != "${new_boot_id}" ]]; then
echo "Node ${instance} recreated."
break
else
echo -n .
fi
sleep 1
done
# Wait for the node to have Ready=True.
echo "== Waiting for ${instance} to become ready. ==" >&2
while true; do
local ready=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output='jsonpath={.status.conditions[?(@.type == "Ready")].status}')
if [[ "${ready}" != 'True' ]]; then
echo "Node ${instance} is still not ready: Ready=${ready}"
else
echo "Node ${instance} Ready=${ready}"
break
fi
sleep 1
done
# Uncordon the node.
echo "== Uncordon ${instance}. == " >&2
local uncordon_rc
"${KUBE_ROOT}/cluster/kubectl.sh" uncordon "${instance}" \
&& uncordon_rc=$? || uncordon_rc=$?
if [[ "${uncordon_rc}" != 0 ]]; then
echo "== FAILED to uncordon ${instance} =="
return ${uncordon_rc}
fi
}
# Prereqs:
# - prepare-node-upgrade should have been called successfully
function do-node-upgrade() {
echo "== Upgrading nodes to ${KUBE_VERSION} with max parallelism of ${node_upgrade_parallelism}. ==" >&2
# Do the actual upgrade.
# NOTE(zmerlynn): If you are changing this gcloud command, update
# test/e2e/cluster_upgrade.go to match this EXACTLY.
local template_name=$(get-template-name-from-version ${SANITIZED_VERSION})
local old_templates=()
local updates=()
for group in ${INSTANCE_GROUPS[@]}; do
old_templates+=($(gcloud compute instance-groups managed list \
--project="${PROJECT}" \
--filter="name ~ '${group}' AND zone:(${ZONE})" \
--format='value(instanceTemplate)' || true))
set_instance_template_out=$(gcloud compute instance-groups managed set-instance-template "${group}" \
--template="${template_name}" \
--project="${PROJECT}" \
--zone="${ZONE}" 2>&1) && set_instance_template_rc=$? || set_instance_template_rc=$?
if [[ "${set_instance_template_rc}" != 0 ]]; then
echo "== FAILED to set-instance-template for ${group} to ${template_name} =="
echo "${set_instance_template_out}"
return ${set_instance_template_rc}
fi
instances=()
instances+=($(gcloud compute instance-groups managed list-instances "${group}" \
--format='value(instance)' \
--project="${PROJECT}" \
--zone="${ZONE}" 2>&1)) && list_instances_rc=$? || list_instances_rc=$?
if [[ "${list_instances_rc}" != 0 ]]; then
echo "== FAILED to list instances in group ${group} =="
echo "${instances}"
return ${list_instances_rc}
fi
process_count_left=${node_upgrade_parallelism}
pids=()
ret_code_sum=0 # Should stay 0 in the loop iff all parallel node upgrades succeed.
for instance in ${instances[@]}; do
do-single-node-upgrade "${instance}" & pids+=("$!")
# We don't want to run more than ${node_upgrade_parallelism} upgrades at a time,
# so wait once we hit that many nodes. This isn't ideal, since one might take much
# longer than the others, but it should help.
process_count_left=$((process_count_left - 1))
if [[ process_count_left -eq 0 || "${instance}" == "${instances[-1]}" ]]; then
# Wait for each of the parallel node upgrades to finish.
for pid in "${pids[@]}"; do
wait $pid
ret_code_sum=$(( ret_code_sum + $? ))
done
# Return even if at least one of the node upgrades failed.
if [[ ${ret_code_sum} != 0 ]]; then
echo "== Some of the ${node_upgrade_parallelism} parallel node upgrades failed. =="
return ${ret_code_sum}
fi
process_count_left=${node_upgrade_parallelism}
fi
done
done
# Remove the old templates.
echo "== Deleting old templates in ${PROJECT}. ==" >&2
for tmpl in ${old_templates[@]}; do
gcloud compute instance-templates delete \
--quiet \
--project="${PROJECT}" \
"${tmpl}" || true
done
echo "== Finished upgrading nodes to ${KUBE_VERSION}. ==" >&2
}
master_upgrade=true
node_upgrade=true
node_prereqs=false
local_binaries=false
env_os_distro=false
node_upgrade_parallelism=1
while getopts ":MNPlcho" opt; do
case ${opt} in
M)
node_upgrade=false
;;
N)
master_upgrade=false
;;
P)
node_prereqs=true
;;
l)
local_binaries=true
;;
c)
node_upgrade_parallelism=${NODE_UPGRADE_PARALLELISM:-1}
;;
o)
env_os_distro=true
;;
h)
usage
exit 0
;;
\?)
echo "Invalid option: -$OPTARG" >&2
usage
exit 1
;;
esac
done
shift $((OPTIND-1))
if [[ $# -gt 1 ]]; then
echo "Error: Only one parameter (<version number or publication>) may be passed after the set of flags!" >&2
usage
exit 1
fi
if [[ $# -lt 1 ]] && [[ "${local_binaries}" == "false" ]]; then
usage
exit 1
fi
if [[ "${master_upgrade}" == "false" ]] && [[ "${node_upgrade}" == "false" ]]; then
echo "Can't specify both -M and -N" >&2
exit 1
fi
# prompt if etcd storage media type isn't set unless using etcd2 when doing master upgrade
if [[ -z "${STORAGE_MEDIA_TYPE:-}" ]] && [[ "${STORAGE_BACKEND:-}" != "etcd2" ]] && [[ "${master_upgrade}" == "true" ]]; then
echo "The default etcd storage media type in 1.6 has changed from application/json to application/vnd.kubernetes.protobuf."
echo "Documentation about the change can be found at https://kubernetes.io/docs/admin/etcd_upgrade."
echo ""
echo "ETCD2 DOES NOT SUPPORT PROTOBUF: If you wish to have to ability to downgrade to etcd2 later application/json must be used."
echo ""
echo "It's HIGHLY recommended that etcd be backed up before this step!!"
echo ""
echo "To enable using json, before running this script set:"
echo "export STORAGE_MEDIA_TYPE=application/json"
echo ""
if [ -t 0 ] && [ -t 1 ]; then
read -p "Would you like to continue with the new default, and lose the ability to downgrade to etcd2? [y/N] " confirm
if [[ "${confirm}" != "y" ]]; then
exit 1
fi
else
echo "To enable using protobuf, before running this script set:"
echo "export STORAGE_MEDIA_TYPE=application/vnd.kubernetes.protobuf"
echo ""
echo "STORAGE_MEDIA_TYPE must be specified when run non-interactively." >&2
exit 1
fi
fi
# Prompt if etcd image/version is unspecified when doing master upgrade.
# In e2e tests, we use TEST_ALLOW_IMPLICIT_ETCD_UPGRADE=true to skip this
# prompt, simulating the behavior when the user confirms interactively.
# All other automated use of this script should explicitly specify a version.
if [[ "${master_upgrade}" == "true" ]]; then
if [[ -z "${ETCD_IMAGE:-}" && -z "${TEST_ETCD_IMAGE:-}" ]] || [[ -z "${ETCD_VERSION:-}" && -z "${TEST_ETCD_VERSION:-}" ]]; then
echo
echo "***WARNING***"
echo "Upgrading Kubernetes with this script might result in an upgrade to a new etcd version."
echo "Some etcd version upgrades, such as 3.0.x to 3.1.x, DO NOT offer a downgrade path."
echo "To pin the etcd version to your current one (e.g. v3.0.17), set the following variables"
echo "before running this script:"
echo
echo "# example: pin to etcd v3.0.17"
echo "export ETCD_IMAGE=3.0.17"
echo "export ETCD_VERSION=3.0.17"
echo
echo "Alternatively, if you choose to allow an etcd upgrade that doesn't support downgrade,"
echo "you might still be able to downgrade Kubernetes by pinning to the newer etcd version."
echo "In all cases, it is strongly recommended to have an etcd backup before upgrading."
echo
if [ -t 0 ] && [ -t 1 ]; then
read -p "Continue with default etcd version, which might upgrade etcd? [y/N] " confirm
if [[ "${confirm}" != "y" ]]; then
exit 1
fi
elif [[ "${TEST_ALLOW_IMPLICIT_ETCD_UPGRADE:-}" != "true" ]]; then
echo "ETCD_IMAGE and ETCD_VERSION must be specified when run non-interactively." >&2
exit 1
fi
fi
fi
print-node-version-info "Pre-Upgrade"
if [[ "${local_binaries}" == "false" ]]; then
set_binary_version ${1}
fi
prepare-upgrade
if [[ "${node_prereqs}" == "true" ]]; then
prepare-node-upgrade
exit 0
fi
if [[ "${master_upgrade}" == "true" ]]; then
upgrade-master
fi
if [[ "${node_upgrade}" == "true" ]]; then
if [[ "${local_binaries}" == "true" ]]; then
echo "Upgrading nodes to local binaries is not yet supported." >&2
exit 1
else
upgrade-nodes
fi
fi
echo "== Validating cluster post-upgrade =="
"${KUBE_ROOT}/cluster/validate-cluster.sh"
print-node-version-info "Post-Upgrade"

File diff suppressed because it is too large Load Diff