mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
44
vendor/k8s.io/kubernetes/cluster/gce/BUILD
generated
vendored
44
vendor/k8s.io/kubernetes/cluster/gce/BUILD
generated
vendored
@ -3,17 +3,6 @@ package(default_visibility = ["//visibility:public"])
|
||||
load("@io_kubernetes_build//defs:build.bzl", "release_filegroup")
|
||||
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
|
||||
|
||||
pkg_tar(
|
||||
name = "gci-trusty-manifests",
|
||||
files = {
|
||||
"//cluster/gce/gci/mounter": "gci-mounter",
|
||||
"gci/configure-helper.sh": "gci-configure-helper.sh",
|
||||
"gci/health-monitor.sh": "health-monitor.sh",
|
||||
},
|
||||
mode = "0755",
|
||||
strip_prefix = ".",
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
@ -26,37 +15,8 @@ filegroup(
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//cluster/gce/addons:all-srcs",
|
||||
"//cluster/gce/gci/mounter:all-srcs",
|
||||
"//cluster/gce/gci:all-srcs",
|
||||
"//cluster/gce/manifests:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
# Having the COS code from the GCE cluster deploy hosted with the release is
|
||||
# useful for GKE. This list should match the list in
|
||||
# kubernetes/release/lib/releaselib.sh.
|
||||
release_filegroup(
|
||||
name = "gcs-release-artifacts",
|
||||
srcs = [
|
||||
"gci/configure.sh",
|
||||
"gci/master.yaml",
|
||||
"gci/node.yaml",
|
||||
],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "gce-master-manifests",
|
||||
srcs = [
|
||||
"manifests/abac-authz-policy.jsonl",
|
||||
"manifests/cluster-autoscaler.manifest",
|
||||
"manifests/e2e-image-puller.manifest",
|
||||
"manifests/etcd.manifest",
|
||||
"manifests/glbc.manifest",
|
||||
"manifests/kube-addon-manager.yaml",
|
||||
"manifests/kube-apiserver.manifest",
|
||||
"manifests/kube-controller-manager.manifest",
|
||||
"manifests/kube-proxy.manifest",
|
||||
"manifests/kube-scheduler.manifest",
|
||||
"manifests/rescheduler.manifest",
|
||||
],
|
||||
mode = "0644",
|
||||
)
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/gce/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/gce/OWNERS
generated
vendored
@ -5,6 +5,7 @@ reviewers:
|
||||
- vishh
|
||||
- mwielgus
|
||||
- MaciekPytel
|
||||
- jingax10
|
||||
approvers:
|
||||
- bowei
|
||||
- gmarek
|
||||
@ -12,3 +13,4 @@ approvers:
|
||||
- vishh
|
||||
- mwielgus
|
||||
- MaciekPytel
|
||||
- jingax10
|
||||
|
@ -11,7 +11,7 @@ metadata:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- policy
|
||||
resourceNames:
|
||||
- gce.persistent-volume-binder
|
||||
resources:
|
||||
|
@ -1,12 +1,12 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: gce.persistent-volume-binder
|
||||
annotations:
|
||||
kubernetes.io/description: 'Policy used by the persistent-volume-binder
|
||||
(a.k.a. persistentvolume-controller) to run recycler pods.'
|
||||
# TODO: This should use the default seccomp profile.
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default,docker/default'
|
||||
labels:
|
||||
kubernetes.io/cluster-service: 'true'
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/gce/addons/podsecuritypolicies/privileged-role.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/gce/addons/podsecuritypolicies/privileged-role.yaml
generated
vendored
@ -7,7 +7,7 @@ metadata:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- policy
|
||||
resourceNames:
|
||||
- gce.privileged
|
||||
resources:
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/gce/addons/podsecuritypolicies/privileged.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/gce/addons/podsecuritypolicies/privileged.yaml
generated
vendored
@ -1,4 +1,4 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: gce.privileged
|
||||
|
@ -8,7 +8,7 @@ metadata:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- policy
|
||||
resourceNames:
|
||||
- gce.unprivileged-addon
|
||||
resources:
|
||||
|
@ -1,4 +1,4 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: gce.unprivileged-addon
|
||||
@ -7,8 +7,8 @@ metadata:
|
||||
privilege necessary to run non-privileged kube-system pods. This policy is
|
||||
not intended for use outside of kube-system, and may include further
|
||||
restrictions in the future.'
|
||||
# TODO: Addons should use the default seccomp profile.
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default,docker/default'
|
||||
# 'runtime/default' is already the default, but must be filled in on the
|
||||
# pod to pass admission.
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||
|
15
vendor/k8s.io/kubernetes/cluster/gce/config-common.sh
generated
vendored
15
vendor/k8s.io/kubernetes/cluster/gce/config-common.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
@ -98,6 +98,19 @@ function get-cluster-ip-range {
|
||||
echo "${suggested_range}"
|
||||
}
|
||||
|
||||
# Calculate ip alias range based on max number of pods.
|
||||
# Let pow be the smallest integer which is bigger or equal to log2($1 * 2).
|
||||
# (32 - pow) will be returned.
|
||||
#
|
||||
# $1: The number of max pods limitation.
|
||||
function get-alias-range-size() {
|
||||
for pow in {0..31}; do
|
||||
if (( 1 << $pow >= $1 * 2 )); then
|
||||
echo $((32 - pow))
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
}
|
||||
# NOTE: Avoid giving nodes empty scopes, because kubelet needs a service account
|
||||
# in order to initialize properly.
|
||||
NODE_SCOPES="${NODE_SCOPES:-monitoring,logging-write,storage-ro}"
|
||||
|
75
vendor/k8s.io/kubernetes/cluster/gce/config-default.sh
generated
vendored
75
vendor/k8s.io/kubernetes/cluster/gce/config-default.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
@ -74,7 +74,7 @@ fi
|
||||
# Also please update corresponding image for node e2e at:
|
||||
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
|
||||
CVM_VERSION=${CVM_VERSION:-container-vm-v20170627}
|
||||
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-63-10032-71-0}
|
||||
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-65-10323-64-0}
|
||||
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
|
||||
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
|
||||
NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}}
|
||||
@ -84,8 +84,6 @@ CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker}
|
||||
CONTAINER_RUNTIME_ENDPOINT=${KUBE_CONTAINER_RUNTIME_ENDPOINT:-}
|
||||
CONTAINER_RUNTIME_NAME=${KUBE_CONTAINER_RUNTIME_NAME:-}
|
||||
LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-}
|
||||
RKT_VERSION=${KUBE_RKT_VERSION:-1.23.0}
|
||||
RKT_STAGE1_IMAGE=${KUBE_RKT_STAGE1_IMAGE:-coreos.com/rkt/stage1-coreos}
|
||||
# MASTER_EXTRA_METADATA is the extra instance metadata on master instance separated by commas.
|
||||
MASTER_EXTRA_METADATA=${KUBE_MASTER_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}}
|
||||
# MASTER_EXTRA_METADATA is the extra instance metadata on node instance separated by commas.
|
||||
@ -126,7 +124,6 @@ NODE_SCOPES="${NODE_SCOPES:-monitoring,logging-write,storage-ro}"
|
||||
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-}"
|
||||
|
||||
VOLUME_PLUGIN_DIR="${VOLUME_PLUGIN_DIR:-/home/kubernetes/flexvolume}"
|
||||
REMOUNT_VOLUME_PLUGIN_DIR="${REMOUNT_VOLUME_PLUGIN_DIR:-true}"
|
||||
|
||||
SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/16}" # formerly PORTAL_NET
|
||||
ALLOCATE_NODE_CIDRS=true
|
||||
@ -145,7 +142,10 @@ ENABLE_L7_LOADBALANCING="${KUBE_ENABLE_L7_LOADBALANCING:-glbc}"
|
||||
# stackdriver - Heapster, Google Cloud Monitoring (schema container), and Google Cloud Logging
|
||||
# googleinfluxdb - Enable influxdb and google (except GCM)
|
||||
# standalone - Heapster only. Metrics available via Heapster REST API.
|
||||
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
|
||||
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-standalone}"
|
||||
|
||||
# Optional: Enable deploying separate prometheus stack for monitoring kubernetes cluster
|
||||
ENABLE_PROMETHEUS_MONITORING="${KUBE_ENABLE_PROMETHEUS_MONITORING:-false}"
|
||||
|
||||
# Optional: Enable Metrics Server. Metrics Server should be enable everywhere,
|
||||
# since it's a critical component, but in the first release we need a way to disable
|
||||
@ -160,9 +160,6 @@ ENABLE_METRICS_SERVER="${KUBE_ENABLE_METRICS_SERVER:-true}"
|
||||
# running on the same node for exporting metrics and logs.
|
||||
ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}"
|
||||
|
||||
# Version tag of metadata agent
|
||||
METADATA_AGENT_VERSION="${KUBE_METADATA_AGENT_VERSION:-0.2-0.0.16-1}"
|
||||
|
||||
# One special node out of NUM_NODES would be created of this type if specified.
|
||||
# Useful for scheduling heapster in large clusters with nodes of small size.
|
||||
HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
|
||||
@ -183,6 +180,16 @@ if [[ ${NETWORK_POLICY_PROVIDER:-} == "calico" ]]; then
|
||||
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}projectcalico.org/ds-ready=true"
|
||||
fi
|
||||
|
||||
# Optional: Enable netd.
|
||||
ENABLE_NETD="${KUBE_ENABLE_NETD:-false}"
|
||||
CUSTOM_NETD_YAML="${KUBE_CUSTOM_NETD_YAML:-}"
|
||||
|
||||
# To avoid running netd on a node that is not configured appropriately,
|
||||
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
|
||||
if [[ ${ENABLE_NETD:-} == "true" ]]; then
|
||||
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}beta.kubernetes.io/kube-netd-ready=true"
|
||||
fi
|
||||
|
||||
# Enable metadata concealment by firewalling pod traffic to the metadata server
|
||||
# and run a proxy daemonset on nodes.
|
||||
#
|
||||
@ -195,6 +202,25 @@ if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_METADATA_CONCEALMENT METADATA_CONCEALMENT_NO_FIREWALL"
|
||||
fi
|
||||
|
||||
|
||||
# Enable AESGCM encryption of secrets by default.
|
||||
ENCRYPTION_PROVIDER_CONFIG="${ENCRYPTION_PROVIDER_CONFIG:-}"
|
||||
if [[ -z "${ENCRYPTION_PROVIDER_CONFIG}" ]]; then
|
||||
ENCRYPTION_PROVIDER_CONFIG=$(cat << EOM | base64 | tr -d '\r\n'
|
||||
kind: EncryptionConfig
|
||||
apiVersion: v1
|
||||
resources:
|
||||
- resources:
|
||||
- secrets
|
||||
providers:
|
||||
- aesgcm:
|
||||
keys:
|
||||
- name: key1
|
||||
secret: $(dd if=/dev/random bs=32 count=1 status=none | base64 | tr -d '\r\n')
|
||||
EOM
|
||||
)
|
||||
fi
|
||||
|
||||
# Optional: Enable node logging.
|
||||
ENABLE_NODE_LOGGING="${KUBE_ENABLE_NODE_LOGGING:-true}"
|
||||
LOGGING_DESTINATION="${KUBE_LOGGING_DESTINATION:-gcp}" # options: elasticsearch, gcp
|
||||
@ -211,6 +237,10 @@ fi
|
||||
# Optional: customize runtime config
|
||||
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}"
|
||||
|
||||
if [[ "${KUBE_FEATURE_GATES:-}" == "AllAlpha=true" ]]; then
|
||||
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-api/all=true}"
|
||||
fi
|
||||
|
||||
# Optional: set feature gates
|
||||
FEATURE_GATES="${KUBE_FEATURE_GATES:-ExperimentalCriticalPodAnnotation=true}"
|
||||
|
||||
@ -269,9 +299,16 @@ ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
|
||||
ENABLE_IP_ALIASES=${KUBE_GCE_ENABLE_IP_ALIASES:-false}
|
||||
NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-RangeAllocator}
|
||||
if [ ${ENABLE_IP_ALIASES} = true ]; then
|
||||
# Size of ranges allocated to each node. Currently supports only /32 and /24.
|
||||
IP_ALIAS_SIZE=${KUBE_GCE_IP_ALIAS_SIZE:-/24}
|
||||
# Number of Pods that can run on this node.
|
||||
MAX_PODS_PER_NODE=${MAX_PODS_PER_NODE:-110}
|
||||
# Size of ranges allocated to each node.
|
||||
IP_ALIAS_SIZE="/$(get-alias-range-size ${MAX_PODS_PER_NODE})"
|
||||
IP_ALIAS_SUBNETWORK=${KUBE_GCE_IP_ALIAS_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-default}
|
||||
# If we're using custom network, use the subnet we already create for it as the one for ip-alias.
|
||||
# Note that this means SUBNETWORK would override KUBE_GCE_IP_ALIAS_SUBNETWORK in case of custom network.
|
||||
if [[ "${CREATE_CUSTOM_NETWORK}" == true ]]; then
|
||||
IP_ALIAS_SUBNETWORK="${SUBNETWORK}"
|
||||
fi
|
||||
# Reserve the services IP space to avoid being allocated for other GCP resources.
|
||||
SERVICE_CLUSTER_IP_SUBNETWORK=${KUBE_GCE_SERVICE_CLUSTER_IP_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-services}
|
||||
NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-CloudAllocator}
|
||||
@ -280,6 +317,10 @@ if [ ${ENABLE_IP_ALIASES} = true ]; then
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_IP_ALIASES"
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} NODE_IPAM_MODE"
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} SECONDARY_RANGE_NAME"
|
||||
elif [[ -n "${MAX_PODS_PER_NODE:-}" ]]; then
|
||||
# Should not have MAX_PODS_PER_NODE set for route-based clusters.
|
||||
echo -e "${color_red}Cannot set MAX_PODS_PER_NODE for route-based projects for ${PROJECT}." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Enable GCE Alpha features.
|
||||
@ -327,7 +368,7 @@ NETWORK_POLICY_PROVIDER="${NETWORK_POLICY_PROVIDER:-none}" # calico
|
||||
NON_MASQUERADE_CIDR="0.0.0.0/0"
|
||||
|
||||
# How should the kubelet configure hairpin mode?
|
||||
HAIRPIN_MODE="${HAIRPIN_MODE:-promiscuous-bridge}" # promiscuous-bridge, hairpin-veth, none
|
||||
HAIRPIN_MODE="${HAIRPIN_MODE:-hairpin-veth}" # promiscuous-bridge, hairpin-veth, none
|
||||
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
|
||||
E2E_STORAGE_TEST_ENVIRONMENT="${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}"
|
||||
|
||||
@ -362,7 +403,7 @@ if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
|
||||
fi
|
||||
|
||||
# Fluentd requirements
|
||||
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.2-1.5.28-1}"
|
||||
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.2-1.5.30-1-k8s}"
|
||||
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}"
|
||||
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}"
|
||||
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}"
|
||||
@ -373,8 +414,14 @@ HEAPSTER_GCP_MEMORY_PER_NODE="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
|
||||
HEAPSTER_GCP_BASE_CPU="${HEAPSTER_GCP_BASE_CPU:-80m}"
|
||||
HEAPSTER_GCP_CPU_PER_NODE="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
|
||||
|
||||
# Default Stackdriver resources version exported by Fluentd-gcp addon
|
||||
LOGGING_STACKDRIVER_RESOURCE_TYPES="${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}"
|
||||
|
||||
# Adding to PROVIDER_VARS, since this is GCP-specific.
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE"
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER LOGGING_STACKDRIVER_RESOURCE_TYPES"
|
||||
|
||||
# Fluentd configuration for node-journal
|
||||
ENABLE_NODE_JOURNAL="${ENABLE_NODE_JOURNAL:-false}"
|
||||
|
||||
# prometheus-to-sd configuration
|
||||
PROMETHEUS_TO_SD_ENDPOINT="${PROMETHEUS_TO_SD_ENDPOINT:-https://monitoring.googleapis.com/}"
|
||||
|
77
vendor/k8s.io/kubernetes/cluster/gce/config-test.sh
generated
vendored
77
vendor/k8s.io/kubernetes/cluster/gce/config-test.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
@ -66,13 +66,17 @@ if [[ "${NODE_OS_DISTRIBUTION}" == "debian" ]]; then
|
||||
NODE_ACCELERATORS=""
|
||||
fi
|
||||
|
||||
# To avoid failing large tests due to some flakes in starting nodes, allow
|
||||
# for a small percentage of nodes to not start during cluster startup.
|
||||
ALLOWED_NOTREADY_NODES="${ALLOWED_NOTREADY_NODES:-$((NUM_NODES / 100))}"
|
||||
|
||||
# By default a cluster will be started with the master and nodes
|
||||
# on Container-optimized OS (cos, previously known as gci). If
|
||||
# you are updating the os image versions, update this variable.
|
||||
# Also please update corresponding image for node e2e at:
|
||||
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
|
||||
CVM_VERSION=${CVM_VERSION:-container-vm-v20170627}
|
||||
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-63-10032-71-0}
|
||||
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-65-10323-64-0}
|
||||
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
|
||||
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
|
||||
NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}}
|
||||
@ -83,8 +87,6 @@ CONTAINER_RUNTIME_ENDPOINT=${KUBE_CONTAINER_RUNTIME_ENDPOINT:-}
|
||||
CONTAINER_RUNTIME_NAME=${KUBE_CONTAINER_RUNTIME_NAME:-}
|
||||
LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-}
|
||||
GCI_DOCKER_VERSION=${KUBE_GCI_DOCKER_VERSION:-}
|
||||
RKT_VERSION=${KUBE_RKT_VERSION:-1.23.0}
|
||||
RKT_STAGE1_IMAGE=${KUBE_RKT_STAGE1_IMAGE:-coreos.com/rkt/stage1-coreos}
|
||||
# MASTER_EXTRA_METADATA is the extra instance metadata on master instance separated by commas.
|
||||
MASTER_EXTRA_METADATA=${KUBE_MASTER_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}}
|
||||
# MASTER_EXTRA_METADATA is the extra instance metadata on node instance separated by commas.
|
||||
@ -111,6 +113,10 @@ NODE_IP_RANGE="$(get-node-ip-range)"
|
||||
|
||||
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}"
|
||||
|
||||
if [[ "${KUBE_FEATURE_GATES:-}" == "AllAlpha=true" ]]; then
|
||||
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-api/all=true}"
|
||||
fi
|
||||
|
||||
# Optional: set feature gates
|
||||
FEATURE_GATES="${KUBE_FEATURE_GATES:-ExperimentalCriticalPodAnnotation=true}"
|
||||
|
||||
@ -138,7 +144,10 @@ ENABLE_L7_LOADBALANCING="${KUBE_ENABLE_L7_LOADBALANCING:-glbc}"
|
||||
# stackdriver - Heapster, Google Cloud Monitoring (schema container), and Google Cloud Logging
|
||||
# googleinfluxdb - Enable influxdb and google (except GCM)
|
||||
# standalone - Heapster only. Metrics available via Heapster REST API.
|
||||
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
|
||||
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-standalone}"
|
||||
|
||||
# Optional: Enable deploying separate prometheus stack for monitoring kubernetes cluster
|
||||
ENABLE_PROMETHEUS_MONITORING="${KUBE_ENABLE_PROMETHEUS_MONITORING:-false}"
|
||||
|
||||
# Optional: Enable Metrics Server. Metrics Server should be enable everywhere,
|
||||
# since it's a critical component, but in the first release we need a way to disable
|
||||
@ -153,14 +162,11 @@ ENABLE_METRICS_SERVER="${KUBE_ENABLE_METRICS_SERVER:-true}"
|
||||
# running on the same node for exporting metrics and logs.
|
||||
ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}"
|
||||
|
||||
# Version tag of metadata agent
|
||||
METADATA_AGENT_VERSION="${KUBE_METADATA_AGENT_VERSION:-0.2-0.0.16-1}"
|
||||
|
||||
# One special node out of NUM_NODES would be created of this type if specified.
|
||||
# Useful for scheduling heapster in large clusters with nodes of small size.
|
||||
HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
|
||||
|
||||
# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.2.14) if you need
|
||||
# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.2.18-0) if you need
|
||||
# non-default version.
|
||||
ETCD_IMAGE="${TEST_ETCD_IMAGE:-}"
|
||||
ETCD_DOCKER_REPOSITORY="${TEST_ETCD_DOCKER_REPOSITORY:-}"
|
||||
@ -176,7 +182,6 @@ SCHEDULER_TEST_LOG_LEVEL="${SCHEDULER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
|
||||
KUBEPROXY_TEST_LOG_LEVEL="${KUBEPROXY_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
|
||||
|
||||
VOLUME_PLUGIN_DIR="${VOLUME_PLUGIN_DIR:-/home/kubernetes/flexvolume}"
|
||||
REMOUNT_VOLUME_PLUGIN_DIR="${REMOUNT_VOLUME_PLUGIN_DIR:-true}"
|
||||
|
||||
TEST_CLUSTER_DELETE_COLLECTION_WORKERS="${TEST_CLUSTER_DELETE_COLLECTION_WORKERS:---delete-collection-workers=1}"
|
||||
TEST_CLUSTER_MAX_REQUESTS_INFLIGHT="${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT:-}"
|
||||
@ -185,14 +190,14 @@ TEST_CLUSTER_RESYNC_PERIOD="${TEST_CLUSTER_RESYNC_PERIOD:---min-resync-period=3m
|
||||
# ContentType used by all components to communicate with apiserver.
|
||||
TEST_CLUSTER_API_CONTENT_TYPE="${TEST_CLUSTER_API_CONTENT_TYPE:-}"
|
||||
|
||||
KUBELET_TEST_ARGS="${KUBELET_TEST_ARGS:-} --max-pods=110 --serialize-image-pulls=false ${TEST_CLUSTER_API_CONTENT_TYPE}"
|
||||
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]] || [[ "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]]; then
|
||||
NODE_KUBELET_TEST_ARGS=" --experimental-kernel-memcg-notification=true"
|
||||
KUBELET_TEST_ARGS="${KUBELET_TEST_ARGS:-} --serialize-image-pulls=false ${TEST_CLUSTER_API_CONTENT_TYPE}"
|
||||
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]] || [[ "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]] || [[ "${NODE_OS_DISTRIBUTION}" == "custom" ]]; then
|
||||
NODE_KUBELET_TEST_ARGS="${NODE_KUBELET_TEST_ARGS:-} --experimental-kernel-memcg-notification=true"
|
||||
fi
|
||||
if [[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]] || [[ "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]]; then
|
||||
MASTER_KUBELET_TEST_ARGS="${MASTER_KUBELET_TEST_ARGS:-} --experimental-kernel-memcg-notification=true"
|
||||
fi
|
||||
if [[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]] || [[ "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]]; then
|
||||
MASTER_KUBELET_TEST_ARGS=" --experimental-kernel-memcg-notification=true"
|
||||
fi
|
||||
APISERVER_TEST_ARGS="${APISERVER_TEST_ARGS:-} --runtime-config=extensions/v1beta1 ${TEST_CLUSTER_DELETE_COLLECTION_WORKERS} ${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT}"
|
||||
APISERVER_TEST_ARGS="${APISERVER_TEST_ARGS:-} --vmodule=httplog=3 --runtime-config=extensions/v1beta1,scheduling.k8s.io/v1alpha1,settings.k8s.io/v1alpha1 ${TEST_CLUSTER_DELETE_COLLECTION_WORKERS} ${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT}"
|
||||
CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE}"
|
||||
SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
|
||||
KUBEPROXY_TEST_ARGS="${KUBEPROXY_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
|
||||
@ -207,6 +212,16 @@ NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
|
||||
# NON_MASTER_NODE_LABELS are labels will only be applied on non-master nodes.
|
||||
NON_MASTER_NODE_LABELS="${KUBE_NON_MASTER_NODE_LABELS:-}"
|
||||
|
||||
# Optional: Enable netd.
|
||||
ENABLE_NETD="${KUBE_ENABLE_NETD:-false}"
|
||||
CUSTOM_NETD_YAML="${KUBE_CUSTOM_NETD_YAML:-}"
|
||||
|
||||
# To avoid running netd on a node that is not configured appropriately,
|
||||
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
|
||||
if [[ ${ENABLE_NETD:-} == "true" ]]; then
|
||||
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}beta.kubernetes.io/kube-netd-ready=true"
|
||||
fi
|
||||
|
||||
# To avoid running Calico on a node that is not configured appropriately,
|
||||
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
|
||||
if [[ ${NETWORK_POLICY_PROVIDER:-} == "calico" ]]; then
|
||||
@ -291,9 +306,16 @@ ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
|
||||
ENABLE_IP_ALIASES=${KUBE_GCE_ENABLE_IP_ALIASES:-false}
|
||||
NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-RangeAllocator}
|
||||
if [ ${ENABLE_IP_ALIASES} = true ]; then
|
||||
# Size of ranges allocated to each node. gcloud current supports only /32 and /24.
|
||||
IP_ALIAS_SIZE=${KUBE_GCE_IP_ALIAS_SIZE:-/24}
|
||||
# Number of Pods that can run on this node.
|
||||
MAX_PODS_PER_NODE=${MAX_PODS_PER_NODE:-110}
|
||||
# Size of ranges allocated to each node.
|
||||
IP_ALIAS_SIZE="/$(get-alias-range-size ${MAX_PODS_PER_NODE})"
|
||||
IP_ALIAS_SUBNETWORK=${KUBE_GCE_IP_ALIAS_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-default}
|
||||
# If we're using custom network, use the subnet we already create for it as the one for ip-alias.
|
||||
# Note that this means SUBNETWORK would override KUBE_GCE_IP_ALIAS_SUBNETWORK in case of custom network.
|
||||
if [[ "${CREATE_CUSTOM_NETWORK}" == true ]]; then
|
||||
IP_ALIAS_SUBNETWORK="${SUBNETWORK}"
|
||||
fi
|
||||
# Reserve the services IP space to avoid being allocated for other GCP resources.
|
||||
SERVICE_CLUSTER_IP_SUBNETWORK=${KUBE_GCE_SERVICE_CLUSTER_IP_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-services}
|
||||
NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-CloudAllocator}
|
||||
@ -302,6 +324,10 @@ if [ ${ENABLE_IP_ALIASES} = true ]; then
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_IP_ALIASES"
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} NODE_IPAM_MODE"
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} SECONDARY_RANGE_NAME"
|
||||
elif [[ -n "${MAX_PODS_PER_NODE:-}" ]]; then
|
||||
# Should not have MAX_PODS_PER_NODE set for route-based clusters.
|
||||
echo -e "${color_red}Cannot set MAX_PODS_PER_NODE for route-based projects for ${PROJECT}." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Enable GCE Alpha features.
|
||||
@ -352,7 +378,7 @@ NETWORK_POLICY_PROVIDER="${NETWORK_POLICY_PROVIDER:-none}" # calico
|
||||
NON_MASQUERADE_CIDR="0.0.0.0/0"
|
||||
|
||||
# How should the kubelet configure hairpin mode?
|
||||
HAIRPIN_MODE="${HAIRPIN_MODE:-promiscuous-bridge}" # promiscuous-bridge, hairpin-veth, none
|
||||
HAIRPIN_MODE="${HAIRPIN_MODE:-hairpin-veth}" # promiscuous-bridge, hairpin-veth, none
|
||||
|
||||
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
|
||||
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
|
||||
@ -377,6 +403,7 @@ ENABLE_LEGACY_ABAC="${ENABLE_LEGACY_ABAC:-false}" # true, false
|
||||
|
||||
# Enable a simple "AdvancedAuditing" setup for testing.
|
||||
ENABLE_APISERVER_ADVANCED_AUDIT="${ENABLE_APISERVER_ADVANCED_AUDIT:-true}" # true, false
|
||||
ADVANCED_AUDIT_LOG_MODE="${ADVANCED_AUDIT_LOG_MODE:-batch}" # batch, blocking
|
||||
|
||||
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]]; then
|
||||
echo "Warning: Basic audit logging is deprecated and will be removed. Please use advanced auditing instead."
|
||||
@ -392,7 +419,7 @@ if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
|
||||
fi
|
||||
|
||||
# Fluentd requirements
|
||||
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.2-1.5.28-1}"
|
||||
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.2-1.5.30-1-k8s}"
|
||||
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}"
|
||||
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}"
|
||||
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}"
|
||||
@ -403,8 +430,14 @@ HEAPSTER_GCP_MEMORY_PER_NODE="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
|
||||
HEAPSTER_GCP_BASE_CPU="${HEAPSTER_GCP_BASE_CPU:-80m}"
|
||||
HEAPSTER_GCP_CPU_PER_NODE="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
|
||||
|
||||
# Default Stackdriver resources version exported by Fluentd-gcp addon
|
||||
LOGGING_STACKDRIVER_RESOURCE_TYPES="${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}"
|
||||
|
||||
# Adding to PROVIDER_VARS, since this is GCP-specific.
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE"
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER LOGGING_STACKDRIVER_RESOURCE_TYPES"
|
||||
|
||||
# Fluentd configuration for node-journal
|
||||
ENABLE_NODE_JOURNAL="${ENABLE_NODE_JOURNAL:-false}"
|
||||
|
||||
# prometheus-to-sd configuration
|
||||
PROMETHEUS_TO_SD_ENDPOINT="${PROMETHEUS_TO_SD_ENDPOINT:-https://monitoring.googleapis.com/}"
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/gce/delete-stranded-load-balancers.sh
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/gce/delete-stranded-load-balancers.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
|
71
vendor/k8s.io/kubernetes/cluster/gce/gci/BUILD
generated
vendored
Normal file
71
vendor/k8s.io/kubernetes/cluster/gce/gci/BUILD
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_test")
|
||||
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
|
||||
load("@io_kubernetes_build//defs:build.bzl", "release_filegroup")
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"apiserver_manifest_test.go",
|
||||
"configure_helper_test.go",
|
||||
],
|
||||
data = [
|
||||
":scripts-test-data",
|
||||
"//cluster/gce/manifests",
|
||||
],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
# Having the COS code from the GCE cluster deploy hosted with the release is
|
||||
# useful for GKE. This list should match the list in
|
||||
# kubernetes/release/lib/releaselib.sh.
|
||||
release_filegroup(
|
||||
name = "gcs-release-artifacts",
|
||||
srcs = [
|
||||
"configure.sh",
|
||||
"master.yaml",
|
||||
"node.yaml",
|
||||
"shutdown.sh",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "gci-trusty-manifests",
|
||||
srcs = glob(["gke-internal-configure-helper.sh"]),
|
||||
files = {
|
||||
"//cluster/gce/gci/mounter": "gci-mounter",
|
||||
"configure-helper.sh": "gci-configure-helper.sh",
|
||||
"health-monitor.sh": "health-monitor.sh",
|
||||
},
|
||||
mode = "0755",
|
||||
strip_prefix = ".",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//cluster/gce/gci/mounter:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "scripts-test-data",
|
||||
srcs = [
|
||||
"configure-helper.sh",
|
||||
],
|
||||
)
|
212
vendor/k8s.io/kubernetes/cluster/gce/gci/apiserver_manifest_test.go
generated
vendored
Normal file
212
vendor/k8s.io/kubernetes/cluster/gce/gci/apiserver_manifest_test.go
generated
vendored
Normal file
@ -0,0 +1,212 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gci
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
/*
|
||||
Template for defining the environment state of configure-helper.sh
|
||||
The environment of configure-helper.sh is initially configured via kube-env file. However, as deploy-helper
|
||||
executes new variables are created. ManifestTestCase does not care where a variable came from. However, future
|
||||
test scenarios, may require such a distinction.
|
||||
The list of variables is, by no means, complete - this is what is required to run currently defined tests.
|
||||
*/
|
||||
deployHelperEnv = `
|
||||
readonly KUBE_HOME={{.KubeHome}}
|
||||
readonly KUBE_API_SERVER_LOG_PATH=${KUBE_HOME}/kube-apiserver.log
|
||||
readonly KUBE_API_SERVER_AUDIT_LOG_PATH=${KUBE_HOME}/kube-apiserver-audit.log
|
||||
readonly CLOUD_CONFIG_OPT=--cloud-config=/etc/gce.conf
|
||||
readonly CA_CERT_BUNDLE_PATH=/foo/bar
|
||||
readonly APISERVER_SERVER_CERT_PATH=/foo/bar
|
||||
readonly APISERVER_SERVER_KEY_PATH=/foo/bar
|
||||
readonly APISERVER_CLIENT_CERT_PATH=/foo/bar
|
||||
readonly CLOUD_CONFIG_MOUNT="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true},"
|
||||
readonly CLOUD_CONFIG_VOLUME="{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"/etc/gce.conf\", \"type\": \"FileOrCreate\"}},"
|
||||
readonly DOCKER_REGISTRY="k8s.gcr.io"
|
||||
readonly ENABLE_LEGACY_ABAC=false
|
||||
readonly ETC_MANIFESTS=${KUBE_HOME}/etc/kubernetes/manifests
|
||||
readonly KUBE_API_SERVER_DOCKER_TAG=v1.11.0-alpha.0.1808_3c7452dc11645d-dirty
|
||||
readonly LOG_OWNER_USER=$(whoami)
|
||||
readonly LOG_OWNER_GROUP=$(id -gn)
|
||||
ENCRYPTION_PROVIDER_CONFIG={{.EncryptionProviderConfig}}
|
||||
ENCRYPTION_PROVIDER_CONFIG_PATH={{.EncryptionProviderConfigPath}}
|
||||
readonly ETCD_KMS_KEY_ID={{.ETCDKMSKeyID}}
|
||||
`
|
||||
kubeAPIServerManifestFileName = "kube-apiserver.manifest"
|
||||
kmsPluginManifestFileName = "kms-plugin-container.manifest"
|
||||
kubeAPIServerStartFuncName = "start-kube-apiserver"
|
||||
|
||||
// Position of containers within a pod manifest
|
||||
kmsPluginContainerIndex = 0
|
||||
apiServerContainerIndexNoKMS = 0
|
||||
apiServerContainerIndexWithKMS = 1
|
||||
|
||||
// command": [
|
||||
// "/bin/sh", - Index 0
|
||||
// "-c", - Index 1
|
||||
// "exec /usr/local/bin/kube-apiserver " - Index 2
|
||||
execArgsIndex = 2
|
||||
|
||||
socketVolumeMountIndexKMSPlugin = 1
|
||||
socketVolumeMountIndexAPIServer = 0
|
||||
)
|
||||
|
||||
type kubeAPIServerEnv struct {
|
||||
KubeHome string
|
||||
EncryptionProviderConfig string
|
||||
EncryptionProviderConfigPath string
|
||||
ETCDKMSKeyID string
|
||||
}
|
||||
|
||||
type kubeAPIServerManifestTestCase struct {
|
||||
*ManifestTestCase
|
||||
apiServerContainer v1.Container
|
||||
kmsPluginContainer v1.Container
|
||||
}
|
||||
|
||||
func newKubeAPIServerManifestTestCase(t *testing.T) *kubeAPIServerManifestTestCase {
|
||||
return &kubeAPIServerManifestTestCase{
|
||||
ManifestTestCase: newManifestTestCase(t, kubeAPIServerManifestFileName, kubeAPIServerStartFuncName, []string{kmsPluginManifestFileName}),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *kubeAPIServerManifestTestCase) mustLoadContainers() {
|
||||
c.mustLoadPodFromManifest()
|
||||
|
||||
switch len(c.pod.Spec.Containers) {
|
||||
case 1:
|
||||
c.apiServerContainer = c.pod.Spec.Containers[apiServerContainerIndexNoKMS]
|
||||
case 2:
|
||||
c.apiServerContainer = c.pod.Spec.Containers[apiServerContainerIndexWithKMS]
|
||||
c.kmsPluginContainer = c.pod.Spec.Containers[kmsPluginContainerIndex]
|
||||
default:
|
||||
c.t.Fatalf("got %d containers in apiserver pod, want 1 or 2", len(c.pod.Spec.Containers))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *kubeAPIServerManifestTestCase) invokeTest(e kubeAPIServerEnv) {
|
||||
c.mustInvokeFunc(deployHelperEnv, e)
|
||||
c.mustLoadContainers()
|
||||
}
|
||||
|
||||
func getEncryptionProviderConfigFlag(path string) string {
|
||||
return fmt.Sprintf("--experimental-encryption-provider-config=%s", path)
|
||||
}
|
||||
|
||||
func TestEncryptionProviderFlag(t *testing.T) {
|
||||
c := newKubeAPIServerManifestTestCase(t)
|
||||
defer c.tearDown()
|
||||
|
||||
e := kubeAPIServerEnv{
|
||||
KubeHome: c.kubeHome,
|
||||
EncryptionProviderConfig: base64.StdEncoding.EncodeToString([]byte("FOO")),
|
||||
EncryptionProviderConfigPath: filepath.Join(c.kubeHome, "encryption-provider-config.yaml"),
|
||||
}
|
||||
|
||||
c.invokeTest(e)
|
||||
|
||||
expectedFlag := getEncryptionProviderConfigFlag(e.EncryptionProviderConfigPath)
|
||||
execArgs := c.apiServerContainer.Command[execArgsIndex]
|
||||
if !strings.Contains(execArgs, expectedFlag) {
|
||||
c.t.Fatalf("Got %q, wanted the flag to contain %q", execArgs, expectedFlag)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncryptionProviderConfig(t *testing.T) {
|
||||
c := newKubeAPIServerManifestTestCase(t)
|
||||
defer c.tearDown()
|
||||
|
||||
p := filepath.Join(c.kubeHome, "encryption-provider-config.yaml")
|
||||
e := kubeAPIServerEnv{
|
||||
KubeHome: c.kubeHome,
|
||||
EncryptionProviderConfig: base64.StdEncoding.EncodeToString([]byte("FOO")),
|
||||
EncryptionProviderConfigPath: p,
|
||||
}
|
||||
|
||||
c.mustInvokeFunc(deployHelperEnv, e)
|
||||
|
||||
if _, err := os.Stat(p); err != nil {
|
||||
c.t.Fatalf("Expected encryption provider config to be written to %s, but stat failed with error: %v", p, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestKMSEncryptionProviderConfig asserts that if ETCD_KMS_KEY_ID is set then start-kube-apiserver will produce
|
||||
// EncryptionProviderConfig file of type KMS and inject experimental-encryption-provider-config startup flag.
|
||||
func TestKMSEncryptionProviderConfig(t *testing.T) {
|
||||
c := newKubeAPIServerManifestTestCase(t)
|
||||
defer c.tearDown()
|
||||
|
||||
e := kubeAPIServerEnv{
|
||||
KubeHome: c.kubeHome,
|
||||
EncryptionProviderConfigPath: filepath.Join(c.kubeHome, "encryption-provider-config.yaml"),
|
||||
ETCDKMSKeyID: "FOO",
|
||||
}
|
||||
|
||||
c.invokeTest(e)
|
||||
|
||||
expectedFlag := getEncryptionProviderConfigFlag(e.EncryptionProviderConfigPath)
|
||||
execArgs := c.apiServerContainer.Command[execArgsIndex]
|
||||
if !strings.Contains(execArgs, expectedFlag) {
|
||||
c.t.Fatalf("Got %q, wanted the flag to contain %q", execArgs, expectedFlag)
|
||||
}
|
||||
|
||||
p := filepath.Join(c.kubeHome, "encryption-provider-config.yaml")
|
||||
if _, err := os.Stat(p); err != nil {
|
||||
c.t.Fatalf("Expected encryption provider config to be written to %s, but stat failed with error: %v", p, err)
|
||||
}
|
||||
|
||||
d, err := ioutil.ReadFile(p)
|
||||
if err != nil {
|
||||
c.t.Fatalf("Failed to read encryption provider config %s", p)
|
||||
}
|
||||
|
||||
if !strings.Contains(string(d), "name: grpc-kms-provider") {
|
||||
c.t.Fatalf("Got %s\n, wanted encryption provider config to be of type grpc-kms", string(d))
|
||||
}
|
||||
}
|
||||
|
||||
func TestKMSPluginAndAPIServerSharedVolume(t *testing.T) {
|
||||
c := newKubeAPIServerManifestTestCase(t)
|
||||
defer c.tearDown()
|
||||
|
||||
var e = kubeAPIServerEnv{
|
||||
KubeHome: c.kubeHome,
|
||||
EncryptionProviderConfigPath: filepath.Join(c.kubeHome, "encryption-provider-config.yaml"),
|
||||
ETCDKMSKeyID: "FOO",
|
||||
}
|
||||
|
||||
c.invokeTest(e)
|
||||
|
||||
k := c.kmsPluginContainer.VolumeMounts[socketVolumeMountIndexKMSPlugin].MountPath
|
||||
a := c.apiServerContainer.VolumeMounts[socketVolumeMountIndexAPIServer].MountPath
|
||||
|
||||
if k != a {
|
||||
t.Fatalf("Got %s!=%s, wanted KMSPlugin VolumeMount #1:%s to be equal to kube-apiserver VolumeMount #0:%s",
|
||||
k, a, k, a)
|
||||
}
|
||||
}
|
601
vendor/k8s.io/kubernetes/cluster/gce/gci/configure-helper.sh
generated
vendored
601
vendor/k8s.io/kubernetes/cluster/gce/gci/configure-helper.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
@ -27,6 +27,8 @@ set -o pipefail
|
||||
|
||||
readonly UUID_MNT_PREFIX="/mnt/disks/by-uuid/google-local-ssds"
|
||||
readonly UUID_BLOCK_PREFIX="/dev/disk/by-uuid/google-local-ssds"
|
||||
readonly COREDNS_AUTOSCALER="Deployment/coredns"
|
||||
readonly KUBEDNS_AUTOSCALER="Deployment/kube-dns"
|
||||
|
||||
# Use --retry-connrefused opt only if it's supported by curl.
|
||||
CURL_RETRY_CONNREFUSED=""
|
||||
@ -183,6 +185,7 @@ function safe-format-and-mount() {
|
||||
mkdir -p "${mountpoint}"
|
||||
echo "Mounting '${device}' at '${mountpoint}'"
|
||||
mount -o discard,defaults "${device}" "${mountpoint}"
|
||||
chmod a+w "${mountpoint}"
|
||||
}
|
||||
|
||||
# Gets a devices UUID and bind mounts the device to mount location in
|
||||
@ -542,6 +545,9 @@ function create-master-auth {
|
||||
if [[ -n "${KUBE_SCHEDULER_TOKEN:-}" ]]; then
|
||||
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_SCHEDULER_TOKEN}," "system:kube-scheduler,uid:system:kube-scheduler"
|
||||
fi
|
||||
if [[ -n "${KUBE_CLUSTER_AUTOSCALER_TOKEN:-}" ]]; then
|
||||
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_CLUSTER_AUTOSCALER_TOKEN}," "cluster-autoscaler,uid:cluster-autoscaler"
|
||||
fi
|
||||
if [[ -n "${KUBE_PROXY_TOKEN:-}" ]]; then
|
||||
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_PROXY_TOKEN}," "system:kube-proxy,uid:kube_proxy"
|
||||
fi
|
||||
@ -889,8 +895,9 @@ function create-kubelet-kubeconfig() {
|
||||
echo "Must provide API server address to create Kubelet kubeconfig file!"
|
||||
exit 1
|
||||
fi
|
||||
echo "Creating kubelet kubeconfig file"
|
||||
cat <<EOF >/var/lib/kubelet/bootstrap-kubeconfig
|
||||
if [[ "${CREATE_BOOTSTRAP_KUBECONFIG:-true}" == "true" ]]; then
|
||||
echo "Creating kubelet bootstrap-kubeconfig file"
|
||||
cat <<EOF >/var/lib/kubelet/bootstrap-kubeconfig
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
@ -910,6 +917,13 @@ contexts:
|
||||
name: service-account-context
|
||||
current-context: service-account-context
|
||||
EOF
|
||||
elif [[ "${FETCH_BOOTSTRAP_KUBECONFIG:-false}" == "true" ]]; then
|
||||
echo "Fetching kubelet bootstrap-kubeconfig file from metadata"
|
||||
get-metadata-value "instance/attributes/bootstrap-kubeconfig" >/var/lib/kubelet/bootstrap-kubeconfig
|
||||
else
|
||||
echo "Fetching kubelet kubeconfig file from metadata"
|
||||
get-metadata-value "instance/attributes/kubeconfig" >/var/lib/kubelet/kubeconfig
|
||||
fi
|
||||
}
|
||||
|
||||
# Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and KUBELET_KEY
|
||||
@ -995,6 +1009,30 @@ current-context: kube-scheduler
|
||||
EOF
|
||||
}
|
||||
|
||||
function create-clusterautoscaler-kubeconfig {
|
||||
echo "Creating cluster-autoscaler kubeconfig file"
|
||||
mkdir -p /etc/srv/kubernetes/cluster-autoscaler
|
||||
cat <<EOF >/etc/srv/kubernetes/cluster-autoscaler/kubeconfig
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: cluster-autoscaler
|
||||
user:
|
||||
token: ${KUBE_CLUSTER_AUTOSCALER_TOKEN}
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://localhost:443
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: cluster-autoscaler
|
||||
name: cluster-autoscaler
|
||||
current-context: cluster-autoscaler
|
||||
EOF
|
||||
}
|
||||
|
||||
function create-kubescheduler-policy-config {
|
||||
echo "Creating kube-scheduler policy config file"
|
||||
mkdir -p /etc/srv/kubernetes/kube-scheduler
|
||||
@ -1112,7 +1150,8 @@ function start-kubelet {
|
||||
echo "Using kubelet binary at ${kubelet_bin}"
|
||||
|
||||
local -r kubelet_env_file="/etc/default/kubelet"
|
||||
echo "KUBELET_OPTS=\"${KUBELET_ARGS}\"" > "${kubelet_env_file}"
|
||||
local kubelet_opts="${KUBELET_ARGS} ${KUBELET_CONFIG_FILE_ARG:-}"
|
||||
echo "KUBELET_OPTS=\"${kubelet_opts}\"" > "${kubelet_env_file}"
|
||||
|
||||
# Write the systemd service file for kubelet.
|
||||
cat <<EOF >/etc/systemd/system/kubelet.service
|
||||
@ -1131,6 +1170,7 @@ ExecStart=${kubelet_bin} \$KUBELET_OPTS
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl start kubelet.service
|
||||
}
|
||||
|
||||
@ -1142,13 +1182,18 @@ function start-node-problem-detector {
|
||||
local -r km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor.json"
|
||||
# TODO(random-liu): Handle this for alternative container runtime.
|
||||
local -r dm_config="${KUBE_HOME}/node-problem-detector/config/docker-monitor.json"
|
||||
local -r custom_km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor-counter.json"
|
||||
echo "Using node problem detector binary at ${npd_bin}"
|
||||
local flags="${NPD_TEST_LOG_LEVEL:-"--v=2"} ${NPD_TEST_ARGS:-}"
|
||||
flags+=" --logtostderr"
|
||||
flags+=" --system-log-monitors=${km_config},${dm_config}"
|
||||
flags+=" --custom-plugin-monitors=${custom_km_config}"
|
||||
flags+=" --apiserver-override=https://${KUBERNETES_MASTER_NAME}?inClusterConfig=false&auth=/var/lib/node-problem-detector/kubeconfig"
|
||||
local -r npd_port=${NODE_PROBLEM_DETECTOR_PORT:-20256}
|
||||
flags+=" --port=${npd_port}"
|
||||
if [[ -n "${EXTRA_NPD_ARGS:-}" ]]; then
|
||||
flags+=" ${EXTRA_NPD_ARGS}"
|
||||
fi
|
||||
|
||||
# Write the systemd service file for node problem detector.
|
||||
cat <<EOF >/etc/systemd/system/node-problem-detector.service
|
||||
@ -1175,7 +1220,7 @@ EOF
|
||||
function prepare-log-file {
|
||||
touch $1
|
||||
chmod 644 $1
|
||||
chown root:root $1
|
||||
chown "${LOG_OWNER_USER:-root}":"${LOG_OWNER_GROUP:-root}" $1
|
||||
}
|
||||
|
||||
# Prepares parameters for kube-proxy manifest.
|
||||
@ -1195,7 +1240,15 @@ function prepare-kube-proxy-manifest-variables {
|
||||
params+=" --feature-gates=${FEATURE_GATES}"
|
||||
fi
|
||||
if [[ "${KUBE_PROXY_MODE:-}" == "ipvs" ]];then
|
||||
params+=" --proxy-mode=ipvs --feature-gates=SupportIPVSProxyMode=true"
|
||||
sudo modprobe -a ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh nf_conntrack_ipv4
|
||||
if [[ $? -eq 0 ]];
|
||||
then
|
||||
params+=" --proxy-mode=ipvs"
|
||||
else
|
||||
# If IPVS modules are not present, make sure the node does not come up as
|
||||
# healthy.
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
params+=" --iptables-sync-period=1m --iptables-min-sync-period=10s --ipvs-sync-period=1m --ipvs-min-sync-period=10s"
|
||||
if [[ -n "${KUBEPROXY_TEST_ARGS:-}" ]]; then
|
||||
@ -1209,10 +1262,6 @@ function prepare-kube-proxy-manifest-variables {
|
||||
kube_cache_mutation_detector_env_name="- name: KUBE_CACHE_MUTATION_DETECTOR"
|
||||
kube_cache_mutation_detector_env_value="value: \"${ENABLE_CACHE_MUTATION_DETECTOR}\""
|
||||
fi
|
||||
local pod_priority=""
|
||||
if [[ "${ENABLE_POD_PRIORITY:-}" == "true" ]]; then
|
||||
pod_priority="priorityClassName: system-node-critical"
|
||||
fi
|
||||
sed -i -e "s@{{kubeconfig}}@${kubeconfig}@g" ${src_file}
|
||||
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${kube_docker_registry}@g" ${src_file}
|
||||
sed -i -e "s@{{pillar\['kube-proxy_docker_tag'\]}}@${kube_proxy_docker_tag}@g" ${src_file}
|
||||
@ -1220,7 +1269,6 @@ function prepare-kube-proxy-manifest-variables {
|
||||
sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
|
||||
sed -i -e "s@{{kube_cache_mutation_detector_env_name}}@${kube_cache_mutation_detector_env_name}@g" ${src_file}
|
||||
sed -i -e "s@{{kube_cache_mutation_detector_env_value}}@${kube_cache_mutation_detector_env_value}@g" ${src_file}
|
||||
sed -i -e "s@{{pod_priority}}@${pod_priority}@g" ${src_file}
|
||||
sed -i -e "s@{{ cpurequest }}@100m@g" ${src_file}
|
||||
sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" ${src_file}
|
||||
sed -i -e "s@{{kubernetes_service_host_env_value}}@${KUBERNETES_MASTER_NAME}@g" ${src_file}
|
||||
@ -1253,6 +1301,7 @@ function prepare-etcd-manifest {
|
||||
local cluster_state="new"
|
||||
local etcd_protocol="http"
|
||||
local etcd_creds=""
|
||||
local etcd_extra_args="${ETCD_EXTRA_ARGS:-}"
|
||||
|
||||
if [[ -n "${INITIAL_ETCD_CLUSTER_STATE:-}" ]]; then
|
||||
cluster_state="${INITIAL_ETCD_CLUSTER_STATE}"
|
||||
@ -1308,6 +1357,7 @@ function prepare-etcd-manifest {
|
||||
fi
|
||||
sed -i -e "s@{{ *etcd_protocol *}}@$etcd_protocol@g" "${temp_file}"
|
||||
sed -i -e "s@{{ *etcd_creds *}}@$etcd_creds@g" "${temp_file}"
|
||||
sed -i -e "s@{{ *etcd_extra_args *}}@$etcd_extra_args@g" "${temp_file}"
|
||||
if [[ -n "${ETCD_VERSION:-}" ]]; then
|
||||
sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@${ETCD_VERSION}@g" "${temp_file}"
|
||||
else
|
||||
@ -1319,7 +1369,8 @@ function prepare-etcd-manifest {
|
||||
}
|
||||
|
||||
function start-etcd-empty-dir-cleanup-pod {
|
||||
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml" "/etc/kubernetes/manifests"
|
||||
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd-empty-dir-cleanup.yaml"
|
||||
cp "${src_file}" "/etc/kubernetes/manifests"
|
||||
}
|
||||
|
||||
# Starts etcd server pod (and etcd-events pod if needed).
|
||||
@ -1400,8 +1451,8 @@ function prepare-mounter-rootfs {
|
||||
# DOCKER_REGISTRY
|
||||
function start-kube-apiserver {
|
||||
echo "Start kubernetes api-server"
|
||||
prepare-log-file /var/log/kube-apiserver.log
|
||||
prepare-log-file /var/log/kube-apiserver-audit.log
|
||||
prepare-log-file "${KUBE_API_SERVER_LOG_PATH:-/var/log/kube-apiserver.log}"
|
||||
prepare-log-file "${KUBE_API_SERVER_AUDIT_LOG_PATH:-/var/log/kube-apiserver-audit.log}"
|
||||
|
||||
# Calculate variables and assemble the command line.
|
||||
local params="${API_SERVER_TEST_LOG_LEVEL:-"--v=2"} ${APISERVER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}"
|
||||
@ -1453,7 +1504,9 @@ function start-kube-apiserver {
|
||||
fi
|
||||
if [[ -n "${NUM_NODES:-}" ]]; then
|
||||
# If the cluster is large, increase max-requests-inflight limit in apiserver.
|
||||
if [[ "${NUM_NODES}" -ge 1000 ]]; then
|
||||
if [[ "${NUM_NODES}" -ge 3000 ]]; then
|
||||
params+=" --max-requests-inflight=3000 --max-mutating-requests-inflight=1000"
|
||||
elif [[ "${NUM_NODES}" -ge 1000 ]]; then
|
||||
params+=" --max-requests-inflight=1500 --max-mutating-requests-inflight=500"
|
||||
fi
|
||||
# Set amount of memory available for apiserver based on number of nodes.
|
||||
@ -1515,6 +1568,33 @@ function start-kube-apiserver {
|
||||
# grows at 10MiB/s (~30K QPS), it will rotate after ~6 years if apiserver
|
||||
# never restarts. Please manually restart apiserver before this time.
|
||||
params+=" --audit-log-maxsize=2000000000"
|
||||
|
||||
# Batching parameters
|
||||
if [[ -n "${ADVANCED_AUDIT_LOG_MODE:-}" ]]; then
|
||||
params+=" --audit-log-mode=${ADVANCED_AUDIT_LOG_MODE}"
|
||||
fi
|
||||
if [[ -n "${ADVANCED_AUDIT_LOG_BUFFER_SIZE:-}" ]]; then
|
||||
params+=" --audit-log-batch-buffer-size=${ADVANCED_AUDIT_LOG_BUFFER_SIZE}"
|
||||
fi
|
||||
if [[ -n "${ADVANCED_AUDIT_LOG_MAX_BATCH_SIZE:-}" ]]; then
|
||||
params+=" --audit-log-batch-max-size=${ADVANCED_AUDIT_LOG_MAX_BATCH_SIZE}"
|
||||
fi
|
||||
if [[ -n "${ADVANCED_AUDIT_LOG_MAX_BATCH_WAIT:-}" ]]; then
|
||||
params+=" --audit-log-batch-max-wait=${ADVANCED_AUDIT_LOG_MAX_BATCH_WAIT}"
|
||||
fi
|
||||
if [[ -n "${ADVANCED_AUDIT_LOG_THROTTLE_QPS:-}" ]]; then
|
||||
params+=" --audit-log-batch-throttle-qps=${ADVANCED_AUDIT_LOG_THROTTLE_QPS}"
|
||||
fi
|
||||
if [[ -n "${ADVANCED_AUDIT_LOG_THROTTLE_BURST:-}" ]]; then
|
||||
params+=" --audit-log-batch-throttle-burst=${ADVANCED_AUDIT_LOG_THROTTLE_BURST}"
|
||||
fi
|
||||
if [[ -n "${ADVANCED_AUDIT_LOG_INITIAL_BACKOFF:-}" ]]; then
|
||||
params+=" --audit-log-initial-backoff=${ADVANCED_AUDIT_LOG_INITIAL_BACKOFF}"
|
||||
fi
|
||||
# Truncating backend parameters
|
||||
if [[ -n "${ADVANCED_AUDIT_TRUNCATING_BACKEND:-}" ]]; then
|
||||
params+=" --audit-log-truncate-enabled=${ADVANCED_AUDIT_TRUNCATING_BACKEND}"
|
||||
fi
|
||||
fi
|
||||
if [[ "${ADVANCED_AUDIT_BACKEND:-}" == *"webhook"* ]]; then
|
||||
params+=" --audit-webhook-mode=batch"
|
||||
@ -1522,6 +1602,14 @@ function start-kube-apiserver {
|
||||
# Create the audit webhook config file, and mount it into the apiserver pod.
|
||||
local -r audit_webhook_config_file="/etc/audit_webhook.config"
|
||||
params+=" --audit-webhook-config-file=${audit_webhook_config_file}"
|
||||
create-master-audit-webhook-config "${audit_webhook_config_file}"
|
||||
audit_webhook_config_mount="{\"name\": \"auditwebhookconfigmount\",\"mountPath\": \"${audit_webhook_config_file}\", \"readOnly\": true},"
|
||||
audit_webhook_config_volume="{\"name\": \"auditwebhookconfigmount\",\"hostPath\": {\"path\": \"${audit_webhook_config_file}\", \"type\": \"FileOrCreate\"}},"
|
||||
|
||||
# Batching parameters
|
||||
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_MODE:-}" ]]; then
|
||||
params+=" --audit-webhook-mode=${ADVANCED_AUDIT_WEBHOOK_MODE}"
|
||||
fi
|
||||
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE:-}" ]]; then
|
||||
params+=" --audit-webhook-batch-buffer-size=${ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE}"
|
||||
fi
|
||||
@ -1538,17 +1626,21 @@ function start-kube-apiserver {
|
||||
params+=" --audit-webhook-batch-throttle-burst=${ADVANCED_AUDIT_WEBHOOK_THROTTLE_BURST}"
|
||||
fi
|
||||
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_INITIAL_BACKOFF:-}" ]]; then
|
||||
params+=" --audit-webhook-batch-initial-backoff=${ADVANCED_AUDIT_WEBHOOK_INITIAL_BACKOFF}"
|
||||
params+=" --audit-webhook-initial-backoff=${ADVANCED_AUDIT_WEBHOOK_INITIAL_BACKOFF}"
|
||||
fi
|
||||
# Truncating backend parameters
|
||||
if [[ -n "${ADVANCED_AUDIT_TRUNCATING_BACKEND:-}" ]]; then
|
||||
params+=" --audit-webhook-truncate-enabled=${ADVANCED_AUDIT_TRUNCATING_BACKEND}"
|
||||
fi
|
||||
create-master-audit-webhook-config "${audit_webhook_config_file}"
|
||||
audit_webhook_config_mount="{\"name\": \"auditwebhookconfigmount\",\"mountPath\": \"${audit_webhook_config_file}\", \"readOnly\": true},"
|
||||
audit_webhook_config_volume="{\"name\": \"auditwebhookconfigmount\",\"hostPath\": {\"path\": \"${audit_webhook_config_file}\", \"type\": \"FileOrCreate\"}},"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "${ENABLE_APISERVER_LOGS_HANDLER:-}" == "false" ]]; then
|
||||
params+=" --enable-logs-handler=false"
|
||||
fi
|
||||
if [[ "${APISERVER_SET_KUBELET_CA:-false}" == "true" ]]; then
|
||||
params+=" --kubelet-certificate-authority=${CA_CERT_BUNDLE_PATH}"
|
||||
fi
|
||||
|
||||
local admission_controller_config_mount=""
|
||||
local admission_controller_config_volume=""
|
||||
@ -1576,15 +1668,19 @@ function start-kube-apiserver {
|
||||
if [[ -n "${FEATURE_GATES:-}" ]]; then
|
||||
params+=" --feature-gates=${FEATURE_GATES}"
|
||||
fi
|
||||
if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then
|
||||
local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip")
|
||||
if [[ -n "${MASTER_ADVERTISE_ADDRESS:-}" ]]; then
|
||||
params+=" --advertise-address=${MASTER_ADVERTISE_ADDRESS}"
|
||||
if [[ -n "${PROXY_SSH_USER:-}" ]]; then
|
||||
params+=" --ssh-user=${PROXY_SSH_USER}"
|
||||
params+=" --ssh-keyfile=/etc/srv/sshproxy/.sshkeyfile"
|
||||
fi
|
||||
elif [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then
|
||||
local -r vm_external_ip=$(get-metadata-value "instance/network-interfaces/0/access-configs/0/external-ip")
|
||||
if [[ -n "${PROXY_SSH_USER:-}" ]]; then
|
||||
params+=" --advertise-address=${vm_external_ip}"
|
||||
params+=" --ssh-user=${PROXY_SSH_USER}"
|
||||
params+=" --ssh-keyfile=/etc/srv/sshproxy/.sshkeyfile"
|
||||
fi
|
||||
elif [ -n "${MASTER_ADVERTISE_ADDRESS:-}" ]; then
|
||||
params="${params} --advertise-address=${MASTER_ADVERTISE_ADDRESS}"
|
||||
fi
|
||||
|
||||
local webhook_authn_config_mount=""
|
||||
@ -1623,7 +1719,7 @@ function start-kube-apiserver {
|
||||
local webhook_config_mount=""
|
||||
local webhook_config_volume=""
|
||||
if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then
|
||||
authorization_mode="Webhook,${authorization_mode}"
|
||||
authorization_mode="${authorization_mode},Webhook"
|
||||
params+=" --authorization-webhook-config-file=/etc/gcp_authz.config"
|
||||
webhook_config_mount="{\"name\": \"webhookconfigmount\",\"mountPath\": \"/etc/gcp_authz.config\", \"readOnly\": false},"
|
||||
webhook_config_volume="{\"name\": \"webhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_authz.config\", \"type\": \"FileOrCreate\"}},"
|
||||
@ -1651,15 +1747,31 @@ function start-kube-apiserver {
|
||||
container_env="\"env\":[{${container_env}}],"
|
||||
fi
|
||||
|
||||
if [[ -n "${ETCD_KMS_KEY_ID:-}" ]]; then
|
||||
ENCRYPTION_PROVIDER_CONFIG=$(cat << EOM | base64 | tr -d '\r\n'
|
||||
kind: EncryptionConfig
|
||||
apiVersion: v1
|
||||
resources:
|
||||
- resources:
|
||||
- secrets
|
||||
providers:
|
||||
- kms:
|
||||
name: grpc-kms-provider
|
||||
cachesize: 1000
|
||||
endpoint: unix:///var/run/kmsplugin/socket.sock
|
||||
EOM
|
||||
)
|
||||
fi
|
||||
|
||||
if [[ -n "${ENCRYPTION_PROVIDER_CONFIG:-}" ]]; then
|
||||
local encryption_provider_config_path="/etc/srv/kubernetes/encryption-provider-config.yml"
|
||||
echo "${ENCRYPTION_PROVIDER_CONFIG}" | base64 --decode > "${encryption_provider_config_path}"
|
||||
params+=" --experimental-encryption-provider-config=${encryption_provider_config_path}"
|
||||
ENCRYPTION_PROVIDER_CONFIG_PATH="${ENCRYPTION_PROVIDER_CONFIG_PATH:-/etc/srv/kubernetes/encryption-provider-config.yml}"
|
||||
echo "${ENCRYPTION_PROVIDER_CONFIG}" | base64 --decode > "${ENCRYPTION_PROVIDER_CONFIG_PATH}"
|
||||
params+=" --experimental-encryption-provider-config=${ENCRYPTION_PROVIDER_CONFIG_PATH}"
|
||||
fi
|
||||
|
||||
src_file="${src_dir}/kube-apiserver.manifest"
|
||||
# Evaluate variables.
|
||||
local -r kube_apiserver_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-apiserver.docker_tag)
|
||||
local -r kube_apiserver_docker_tag="${KUBE_API_SERVER_DOCKER_TAG:-$(cat /home/kubernetes/kube-docker-files/kube-apiserver.docker_tag)}"
|
||||
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
|
||||
sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
|
||||
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
|
||||
@ -1686,7 +1798,68 @@ function start-kube-apiserver {
|
||||
sed -i -e "s@{{admission_controller_config_volume}}@${admission_controller_config_volume}@g" "${src_file}"
|
||||
sed -i -e "s@{{image_policy_webhook_config_mount}}@${image_policy_webhook_config_mount}@g" "${src_file}"
|
||||
sed -i -e "s@{{image_policy_webhook_config_volume}}@${image_policy_webhook_config_volume}@g" "${src_file}"
|
||||
cp "${src_file}" /etc/kubernetes/manifests
|
||||
|
||||
if [[ -z "${ETCD_KMS_KEY_ID:-}" ]]; then
|
||||
# Removing KMS related placeholders.
|
||||
sed -i -e " {
|
||||
s@{{kms_plugin_container}}@@
|
||||
|
||||
s@{{kms_socket_mount}}@@
|
||||
s@{{encryption_provider_mount}}@@
|
||||
|
||||
s@{{kms_socket_volume}}@@
|
||||
s@{{encryption_provider_volume}}@@
|
||||
} " "${src_file}"
|
||||
else
|
||||
local kms_plugin_src_file="${src_dir}/kms-plugin-container.manifest"
|
||||
|
||||
if [[ ! -f "${kms_plugin_src_file}" ]]; then
|
||||
echo "Error: KMS Integration was requested, but "${kms_plugin_src_file}" is missing."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -f "${ENCRYPTION_PROVIDER_CONFIG_PATH}" ]]; then
|
||||
echo "Error: KMS Integration was requested, but "${ENCRYPTION_PROVIDER_CONFIG_PATH}" is missing."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# TODO: Validate that the encryption config is for KMS.
|
||||
|
||||
local kms_socket_dir="/var/run/kmsplugin"
|
||||
|
||||
# kms_socket_mnt is used by both kms_plugin and kube-apiserver - this is how these containers talk.
|
||||
local kms_socket_mnt="{ \"name\": \"kmssocket\", \"mountPath\": \"${kms_socket_dir}\", \"readOnly\": false}"
|
||||
|
||||
local kms_socket_vol="{ \"name\": \"kmssocket\", \"hostPath\": {\"path\": \"${kms_socket_dir}\", \"type\": \"DirectoryOrCreate\"}}"
|
||||
local kms_path_to_socket="${kms_socket_dir}/socket.sock"
|
||||
|
||||
local encryption_provider_mnt="{ \"name\": \"encryptionconfig\", \"mountPath\": \"${ENCRYPTION_PROVIDER_CONFIG_PATH}\", \"readOnly\": true}"
|
||||
local encryption_provider_vol="{ \"name\": \"encryptionconfig\", \"hostPath\": {\"path\": \"${ENCRYPTION_PROVIDER_CONFIG_PATH}\", \"type\": \"File\"}}"
|
||||
|
||||
# TODO these are used in other places, convert to global.
|
||||
local gce_conf_path="/etc/gce.conf"
|
||||
local cloud_config_mount="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true}"
|
||||
|
||||
local kms_plugin_container=$(echo $(sed " {
|
||||
s@{{kms_key_uri}}@${ETCD_KMS_KEY_ID}@
|
||||
s@{{gce_conf_path}}@${gce_conf_path}@
|
||||
s@{{kms_path_to_socket}}@${kms_path_to_socket}@
|
||||
s@{{kms_socket_mount}}@${kms_socket_mnt}@
|
||||
s@{{cloud_config_mount}}@${cloud_config_mount}@
|
||||
} " "${kms_plugin_src_file}") | tr "\n" "\\n")
|
||||
|
||||
sed -i -e " {
|
||||
s@{{kms_plugin_container}}@${kms_plugin_container},@
|
||||
|
||||
s@{{kms_socket_mount}}@${kms_socket_mnt},@
|
||||
s@{{encryption_provider_mount}}@${encryption_provider_mnt},@
|
||||
|
||||
s@{{kms_socket_volume}}@${kms_socket_vol},@
|
||||
s@{{encryption_provider_volume}}@${encryption_provider_vol},@
|
||||
} " "${src_file}"
|
||||
fi
|
||||
|
||||
cp "${src_file}" "${ETC_MANIFESTS:-/etc/kubernetes/manifests}"
|
||||
}
|
||||
|
||||
# Starts kubernetes controller manager.
|
||||
@ -1759,6 +1932,9 @@ function start-kube-controller-manager {
|
||||
params+=" --pv-recycler-pod-template-filepath-nfs=$PV_RECYCLER_OVERRIDE_TEMPLATE"
|
||||
params+=" --pv-recycler-pod-template-filepath-hostpath=$PV_RECYCLER_OVERRIDE_TEMPLATE"
|
||||
fi
|
||||
if [[ -n "${RUN_CONTROLLERS:-}" ]]; then
|
||||
params+=" --controllers=${RUN_CONTROLLERS}"
|
||||
fi
|
||||
|
||||
local -r kube_rc_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-controller-manager.docker_tag)
|
||||
local container_env=""
|
||||
@ -1830,12 +2006,15 @@ function start-kube-scheduler {
|
||||
function start-cluster-autoscaler {
|
||||
if [[ "${ENABLE_CLUSTER_AUTOSCALER:-}" == "true" ]]; then
|
||||
echo "Start kubernetes cluster autoscaler"
|
||||
setup-addon-manifests "addons" "rbac/cluster-autoscaler"
|
||||
create-clusterautoscaler-kubeconfig
|
||||
prepare-log-file /var/log/cluster-autoscaler.log
|
||||
|
||||
# Remove salt comments and replace variables with values
|
||||
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cluster-autoscaler.manifest"
|
||||
|
||||
local params="${AUTOSCALER_MIG_CONFIG} ${CLOUD_CONFIG_OPT} ${AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
|
||||
params+=" --kubeconfig=/etc/srv/kubernetes/cluster-autoscaler/kubeconfig"
|
||||
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
|
||||
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
|
||||
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
|
||||
@ -1893,6 +2072,20 @@ function download-extra-addons {
|
||||
"${curl_cmd[@]}"
|
||||
}
|
||||
|
||||
# A function that fetches a GCE metadata value and echoes it out.
|
||||
#
|
||||
# $1: URL path after /computeMetadata/v1/ (without heading slash).
|
||||
function get-metadata-value {
|
||||
curl \
|
||||
--retry 5 \
|
||||
--retry-delay 3 \
|
||||
${CURL_RETRY_CONNREFUSED} \
|
||||
--fail \
|
||||
--silent \
|
||||
-H 'Metadata-Flavor: Google' \
|
||||
"http://metadata/computeMetadata/v1/${1}"
|
||||
}
|
||||
|
||||
# A helper function for copying manifests and setting dir/files
|
||||
# permissions.
|
||||
#
|
||||
@ -1974,10 +2167,25 @@ function start-fluentd-resource-update {
|
||||
wait-for-apiserver-and-update-fluentd &
|
||||
}
|
||||
|
||||
# Update {{ container-runtime }} with actual container runtime name.
|
||||
# Update {{ container-runtime }} with actual container runtime name,
|
||||
# and {{ container-runtime-endpoint }} with actual container runtime
|
||||
# endpoint.
|
||||
function update-container-runtime {
|
||||
local -r file="$1"
|
||||
local -r container_runtime_endpoint="${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock}"
|
||||
sed -i \
|
||||
-e "s@{{ *container_runtime *}}@${CONTAINER_RUNTIME_NAME:-docker}@g" \
|
||||
-e "s@{{ *container_runtime_endpoint *}}@${container_runtime_endpoint#unix://}@g" \
|
||||
"${file}"
|
||||
}
|
||||
|
||||
# Remove configuration in yaml file if node journal is not enabled.
|
||||
function update-node-journal {
|
||||
local -r configmap_yaml="$1"
|
||||
sed -i -e "s@{{ *container_runtime *}}@${CONTAINER_RUNTIME_NAME:-docker}@g" "${configmap_yaml}"
|
||||
if [[ "${ENABLE_NODE_JOURNAL:-}" != "true" ]]; then
|
||||
# Removes all lines between two patterns (throws away node-journal)
|
||||
sed -i -e "/# BEGIN_NODE_JOURNAL/,/# END_NODE_JOURNAL/d" "${configmap_yaml}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Updates parameters in yaml file for prometheus-to-sd configuration, or
|
||||
@ -1994,23 +2202,60 @@ function update-prometheus-to-sd-parameters {
|
||||
|
||||
# Updates parameters in yaml file for event-exporter configuration
|
||||
function update-event-exporter {
|
||||
sed -i -e "s@{{ *event_exporter_zone *}}@${ZONE:-}@g" "$1"
|
||||
local -r stackdriver_resource_model="${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}"
|
||||
sed -i -e "s@{{ exporter_sd_resource_model }}@${stackdriver_resource_model}@g" "$1"
|
||||
}
|
||||
|
||||
function update-dashboard-controller {
|
||||
if [ -n "${CUSTOM_KUBE_DASHBOARD_BANNER:-}" ]; then
|
||||
sed -i -e "s@\( \+\)# PLATFORM-SPECIFIC ARGS HERE@\1- --system-banner=${CUSTOM_KUBE_DASHBOARD_BANNER}\n\1- --system-banner-severity=WARNING@" "$1"
|
||||
fi
|
||||
}
|
||||
|
||||
# Sets up the manifests of coreDNS for k8s addons.
|
||||
function setup-coredns-manifest {
|
||||
local -r coredns_file="${dst_dir}/dns/coredns.yaml"
|
||||
mv "${dst_dir}/dns/coredns.yaml.in" "${coredns_file}"
|
||||
local -r coredns_file="${dst_dir}/dns/coredns/coredns.yaml"
|
||||
mv "${dst_dir}/dns/coredns/coredns.yaml.in" "${coredns_file}"
|
||||
# Replace the salt configurations with variable values.
|
||||
sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${coredns_file}"
|
||||
sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${coredns_file}"
|
||||
sed -i -e "s@{{ *pillar\['service_cluster_ip_range'\] *}}@${SERVICE_CLUSTER_IP_RANGE}@g" "${coredns_file}"
|
||||
|
||||
if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then
|
||||
setup-addon-manifests "addons" "dns-horizontal-autoscaler" "gce"
|
||||
local -r dns_autoscaler_file="${dst_dir}/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml"
|
||||
sed -i'' -e "s@{{.Target}}@${COREDNS_AUTOSCALER}@g" "${dns_autoscaler_file}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Sets up the manifests of Fluentd configmap and yamls for k8s addons.
|
||||
function setup-fluentd {
|
||||
local -r dst_dir="$1"
|
||||
local -r fluentd_gcp_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-ds.yaml"
|
||||
# Ingest logs against new resources like "k8s_container" and "k8s_node" if
|
||||
# LOGGING_STACKDRIVER_RESOURCE_TYPES is "new".
|
||||
# Ingest logs against old resources like "gke_container" and "gce_instance" if
|
||||
# LOGGING_STACKDRIVER_RESOURCE_TYPES is "old".
|
||||
if [[ "${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}" == "new" ]]; then
|
||||
local -r fluentd_gcp_configmap_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-configmap.yaml"
|
||||
fluentd_gcp_configmap_name="fluentd-gcp-config"
|
||||
else
|
||||
local -r fluentd_gcp_configmap_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-configmap-old.yaml"
|
||||
fluentd_gcp_configmap_name="fluentd-gcp-config-old"
|
||||
fi
|
||||
sed -i -e "s@{{ fluentd_gcp_configmap_name }}@${fluentd_gcp_configmap_name}@g" "${fluentd_gcp_yaml}"
|
||||
fluentd_gcp_version="${FLUENTD_GCP_VERSION:-0.2-1.5.30-1-k8s}"
|
||||
sed -i -e "s@{{ fluentd_gcp_version }}@${fluentd_gcp_version}@g" "${fluentd_gcp_yaml}"
|
||||
update-prometheus-to-sd-parameters ${fluentd_gcp_yaml}
|
||||
start-fluentd-resource-update ${fluentd_gcp_yaml}
|
||||
update-container-runtime ${fluentd_gcp_configmap_yaml}
|
||||
update-node-journal ${fluentd_gcp_configmap_yaml}
|
||||
}
|
||||
|
||||
# Sets up the manifests of kube-dns for k8s addons.
|
||||
function setup-kube-dns-manifest {
|
||||
local -r kubedns_file="${dst_dir}/dns/kube-dns.yaml"
|
||||
mv "${dst_dir}/dns/kube-dns.yaml.in" "${kubedns_file}"
|
||||
local -r kubedns_file="${dst_dir}/dns/kube-dns/kube-dns.yaml"
|
||||
mv "${dst_dir}/dns/kube-dns/kube-dns.yaml.in" "${kubedns_file}"
|
||||
if [ -n "${CUSTOM_KUBE_DNS_YAML:-}" ]; then
|
||||
# Replace with custom GKE kube-dns deployment.
|
||||
cat > "${kubedns_file}" <<EOF
|
||||
@ -2024,6 +2269,38 @@ EOF
|
||||
|
||||
if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then
|
||||
setup-addon-manifests "addons" "dns-horizontal-autoscaler" "gce"
|
||||
local -r dns_autoscaler_file="${dst_dir}/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml"
|
||||
sed -i'' -e "s@{{.Target}}@${KUBEDNS_AUTOSCALER}@g" "${dns_autoscaler_file}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Sets up the manifests of netd for k8s addons.
|
||||
function setup-netd-manifest {
|
||||
local -r netd_file="${dst_dir}/netd/netd.yaml"
|
||||
mkdir -p "${dst_dir}/netd"
|
||||
touch "${netd_file}"
|
||||
if [ -n "${CUSTOM_NETD_YAML:-}" ]; then
|
||||
# Replace with custom GCP netd deployment.
|
||||
cat > "${netd_file}" <<EOF
|
||||
$(echo "$CUSTOM_NETD_YAML")
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
# A helper function to set up a custom yaml for a k8s addon.
|
||||
#
|
||||
# $1: addon category under /etc/kubernetes
|
||||
# $2: manifest source dir
|
||||
# $3: manifest file
|
||||
# $4: custom yaml
|
||||
function setup-addon-custom-yaml {
|
||||
local -r manifest_path="/etc/kubernetes/$1/$2/$3"
|
||||
local -r custom_yaml="$4"
|
||||
if [ -n "${custom_yaml:-}" ]; then
|
||||
# Replace with custom manifest.
|
||||
cat > "${manifest_path}" <<EOF
|
||||
$custom_yaml
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
@ -2060,6 +2337,11 @@ EOF
|
||||
prepare-kube-proxy-manifest-variables "$src_dir/kube-proxy/kube-proxy-ds.yaml"
|
||||
setup-addon-manifests "addons" "kube-proxy"
|
||||
fi
|
||||
# Setup prometheus stack for monitoring kubernetes cluster
|
||||
if [[ "${ENABLE_PROMETHEUS_MONITORING:-}" == "true" ]]; then
|
||||
setup-addon-manifests "addons" "prometheus"
|
||||
fi
|
||||
# Setup cluster monitoring using heapster
|
||||
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "influxdb" ]] || \
|
||||
[[ "${ENABLE_CLUSTER_MONITORING:-}" == "google" ]] || \
|
||||
[[ "${ENABLE_CLUSTER_MONITORING:-}" == "stackdriver" ]] || \
|
||||
@ -2089,6 +2371,7 @@ EOF
|
||||
fi
|
||||
|
||||
sed -i -e "s@{{ cluster_name }}@${CLUSTER_NAME}@g" "${controller_yaml}"
|
||||
sed -i -e "s@{{ cluster_location }}@${ZONE}@g" "${controller_yaml}"
|
||||
sed -i -e "s@{{ *base_metrics_memory *}}@${base_metrics_memory}@g" "${controller_yaml}"
|
||||
sed -i -e "s@{{ *base_metrics_cpu *}}@${base_metrics_cpu}@g" "${controller_yaml}"
|
||||
sed -i -e "s@{{ *base_eventer_memory *}}@${base_eventer_memory}@g" "${controller_yaml}"
|
||||
@ -2108,15 +2391,17 @@ EOF
|
||||
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "stackdriver" ]] ||
|
||||
([[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]] &&
|
||||
[[ "${LOGGING_DESTINATION:-}" == "gcp" ]]); then
|
||||
if [[ "${ENABLE_METADATA_AGENT:-}" == "stackdriver" ]] &&
|
||||
[[ "${METADATA_AGENT_VERSION:-}" != "" ]]; then
|
||||
if [[ "${ENABLE_METADATA_AGENT:-}" == "stackdriver" ]]; then
|
||||
metadata_agent_cpu_request="${METADATA_AGENT_CPU_REQUEST:-40m}"
|
||||
metadata_agent_memory_request="${METADATA_AGENT_MEMORY_REQUEST:-50Mi}"
|
||||
metadata_agent_cluster_level_cpu_request="${METADATA_AGENT_CLUSTER_LEVEL_CPU_REQUEST:-40m}"
|
||||
metadata_agent_cluster_level_memory_request="${METADATA_AGENT_CLUSTER_LEVEL_MEMORY_REQUEST:-50Mi}"
|
||||
setup-addon-manifests "addons" "metadata-agent/stackdriver"
|
||||
daemon_set_yaml="${dst_dir}/metadata-agent/stackdriver/metadata-agent.yaml"
|
||||
sed -i -e "s@{{ metadata_agent_version }}@${METADATA_AGENT_VERSION}@g" "${daemon_set_yaml}"
|
||||
sed -i -e "s@{{ metadata_agent_cpu_request }}@${metadata_agent_cpu_request}@g" "${daemon_set_yaml}"
|
||||
sed -i -e "s@{{ metadata_agent_memory_request }}@${metadata_agent_memory_request}@g" "${daemon_set_yaml}"
|
||||
metadata_agent_yaml="${dst_dir}/metadata-agent/stackdriver/metadata-agent.yaml"
|
||||
sed -i -e "s@{{ metadata_agent_cpu_request }}@${metadata_agent_cpu_request}@g" "${metadata_agent_yaml}"
|
||||
sed -i -e "s@{{ metadata_agent_memory_request }}@${metadata_agent_memory_request}@g" "${metadata_agent_yaml}"
|
||||
sed -i -e "s@{{ metadata_agent_cluster_level_cpu_request }}@${metadata_agent_cluster_level_cpu_request}@g" "${metadata_agent_yaml}"
|
||||
sed -i -e "s@{{ metadata_agent_cluster_level_memory_request }}@${metadata_agent_cluster_level_memory_request}@g" "${metadata_agent_yaml}"
|
||||
fi
|
||||
fi
|
||||
if [[ "${ENABLE_METRICS_SERVER:-}" == "true" ]]; then
|
||||
@ -2126,13 +2411,17 @@ EOF
|
||||
setup-addon-manifests "addons" "device-plugins/nvidia-gpu"
|
||||
fi
|
||||
if [[ "${ENABLE_CLUSTER_DNS:-}" == "true" ]]; then
|
||||
setup-addon-manifests "addons" "dns"
|
||||
if [[ "${CLUSTER_DNS_CORE_DNS:-}" == "true" ]]; then
|
||||
setup-addon-manifests "addons" "dns/coredns"
|
||||
setup-coredns-manifest
|
||||
else
|
||||
setup-addon-manifests "addons" "dns/kube-dns"
|
||||
setup-kube-dns-manifest
|
||||
fi
|
||||
fi
|
||||
if [[ "${ENABLE_NETD:-}" == "true" ]]; then
|
||||
setup-netd-manifest
|
||||
fi
|
||||
if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
|
||||
[[ "${LOGGING_DESTINATION:-}" == "elasticsearch" ]] && \
|
||||
[[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]]; then
|
||||
@ -2143,19 +2432,15 @@ EOF
|
||||
if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
|
||||
[[ "${LOGGING_DESTINATION:-}" == "gcp" ]]; then
|
||||
setup-addon-manifests "addons" "fluentd-gcp"
|
||||
setup-fluentd ${dst_dir}
|
||||
local -r event_exporter_yaml="${dst_dir}/fluentd-gcp/event-exporter.yaml"
|
||||
local -r fluentd_gcp_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-ds.yaml"
|
||||
local -r fluentd_gcp_configmap_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-configmap.yaml"
|
||||
update-event-exporter ${event_exporter_yaml}
|
||||
fluentd_gcp_version="${FLUENTD_GCP_VERSION:-0.2-1.5.28-1}"
|
||||
sed -i -e "s@{{ fluentd_gcp_version }}@${fluentd_gcp_version}@g" "${fluentd_gcp_yaml}"
|
||||
update-prometheus-to-sd-parameters ${event_exporter_yaml}
|
||||
update-prometheus-to-sd-parameters ${fluentd_gcp_yaml}
|
||||
start-fluentd-resource-update ${fluentd_gcp_yaml}
|
||||
update-container-runtime ${fluentd_gcp_configmap_yaml}
|
||||
fi
|
||||
if [[ "${ENABLE_CLUSTER_UI:-}" == "true" ]]; then
|
||||
setup-addon-manifests "addons" "dashboard"
|
||||
local -r dashboard_controller_yaml="${dst_dir}/dashboard/dashboard-controller.yaml"
|
||||
update-dashboard-controller ${dashboard_controller_yaml}
|
||||
fi
|
||||
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "daemonset" ]]; then
|
||||
setup-addon-manifests "addons" "node-problem-detector"
|
||||
@ -2170,6 +2455,9 @@ EOF
|
||||
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
|
||||
setup-addon-manifests "addons" "calico-policy-controller"
|
||||
|
||||
setup-addon-custom-yaml "addons" "calico-policy-controller" "calico-node-daemonset.yaml" "${CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}"
|
||||
setup-addon-custom-yaml "addons" "calico-policy-controller" "typha-deployment.yaml" "${CUSTOM_TYPHA_DEPLOYMENT_YAML:-}"
|
||||
|
||||
# Configure Calico CNI directory.
|
||||
local -r ds_file="${dst_dir}/calico-policy-controller/calico-node-daemonset.yaml"
|
||||
sed -i -e "s@__CALICO_CNI_DIR__@/home/kubernetes/bin@g" "${ds_file}"
|
||||
@ -2204,8 +2492,9 @@ EOF
|
||||
# Starts an image-puller - used in test clusters.
|
||||
function start-image-puller {
|
||||
echo "Start image-puller"
|
||||
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/e2e-image-puller.manifest" \
|
||||
/etc/kubernetes/manifests/
|
||||
local -r e2e_image_puller_manifest="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/e2e-image-puller.manifest"
|
||||
update-container-runtime "${e2e_image_puller_manifest}"
|
||||
cp "${e2e_image_puller_manifest}" /etc/kubernetes/manifests/
|
||||
}
|
||||
|
||||
# Setups manifests for ingress controller and gce-specific policies for service controller.
|
||||
@ -2218,11 +2507,19 @@ function start-lb-controller {
|
||||
prepare-log-file /var/log/glbc.log
|
||||
setup-addon-manifests "addons" "cluster-loadbalancing/glbc"
|
||||
|
||||
local -r glbc_manifest="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/glbc.manifest"
|
||||
if [[ ! -z "${GCE_GLBC_IMAGE:-}" ]]; then
|
||||
sed -i "s@image:.*@image: ${GCE_GLBC_IMAGE}@" "${glbc_manifest}"
|
||||
local -r src_manifest="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/glbc.manifest"
|
||||
local -r dest_manifest="/etc/kubernetes/manifests/glbc.manifest"
|
||||
|
||||
if [[ -n "${CUSTOM_INGRESS_YAML:-}" ]]; then
|
||||
echo "${CUSTOM_INGRESS_YAML}" > "${dest_manifest}"
|
||||
else
|
||||
cp "${src_manifest}" "${dest_manifest}"
|
||||
fi
|
||||
|
||||
# Override the glbc image if GCE_GLBC_IMAGE is specified.
|
||||
if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
|
||||
sed -i "s|image:.*|image: ${GCE_GLBC_IMAGE}|" "${dest_manifest}"
|
||||
fi
|
||||
cp "${glbc_manifest}" /etc/kubernetes/manifests/
|
||||
fi
|
||||
}
|
||||
|
||||
@ -2243,6 +2540,15 @@ function setup-kubelet-dir {
|
||||
mount -B -o remount,exec,suid,dev /var/lib/kubelet
|
||||
}
|
||||
|
||||
# Override for GKE custom master setup scripts (no-op outside of GKE).
|
||||
function gke-master-start {
|
||||
if [[ -e "${KUBE_HOME}/bin/gke-internal-configure-helper.sh" ]]; then
|
||||
echo "Running GKE internal configuration script"
|
||||
. "${KUBE_HOME}/bin/gke-internal-configure-helper.sh"
|
||||
gke-internal-master-start
|
||||
fi
|
||||
}
|
||||
|
||||
function reset-motd {
|
||||
# kubelet is installed both on the master and nodes, and the version is easy to parse (unlike kubectl)
|
||||
local -r version="$("${KUBE_HOME}"/bin/kubelet --version=true | cut -f2 -d " ")"
|
||||
@ -2282,6 +2588,16 @@ EOF
|
||||
function override-kubectl {
|
||||
echo "overriding kubectl"
|
||||
echo "export PATH=${KUBE_HOME}/bin:\$PATH" > /etc/profile.d/kube_env.sh
|
||||
# Add ${KUBE_HOME}/bin into sudoer secure path.
|
||||
local sudo_path
|
||||
sudo_path=$(sudo env | grep "^PATH=")
|
||||
if [[ -n "${sudo_path}" ]]; then
|
||||
sudo_path=${sudo_path#PATH=}
|
||||
(
|
||||
umask 027
|
||||
echo "Defaults secure_path=\"${KUBE_HOME}/bin:${sudo_path}\"" > /etc/sudoers.d/kube_secure_path
|
||||
)
|
||||
fi
|
||||
}
|
||||
|
||||
function override-pv-recycler {
|
||||
@ -2319,89 +2635,106 @@ EOF
|
||||
}
|
||||
|
||||
########### Main Function ###########
|
||||
echo "Start to configure instance for kubernetes"
|
||||
function main() {
|
||||
echo "Start to configure instance for kubernetes"
|
||||
|
||||
KUBE_HOME="/home/kubernetes"
|
||||
CONTAINERIZED_MOUNTER_HOME="${KUBE_HOME}/containerized_mounter"
|
||||
PV_RECYCLER_OVERRIDE_TEMPLATE="${KUBE_HOME}/kube-manifests/kubernetes/pv-recycler-template.yaml"
|
||||
KUBE_HOME="/home/kubernetes"
|
||||
CONTAINERIZED_MOUNTER_HOME="${KUBE_HOME}/containerized_mounter"
|
||||
PV_RECYCLER_OVERRIDE_TEMPLATE="${KUBE_HOME}/kube-manifests/kubernetes/pv-recycler-template.yaml"
|
||||
|
||||
if [[ ! -e "${KUBE_HOME}/kube-env" ]]; then
|
||||
echo "The ${KUBE_HOME}/kube-env file does not exist!! Terminate cluster initialization."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
source "${KUBE_HOME}/kube-env"
|
||||
|
||||
if [[ -e "${KUBE_HOME}/kube-master-certs" ]]; then
|
||||
source "${KUBE_HOME}/kube-master-certs"
|
||||
fi
|
||||
|
||||
if [[ -n "${KUBE_USER:-}" ]]; then
|
||||
if ! [[ "${KUBE_USER}" =~ ^[-._@a-zA-Z0-9]+$ ]]; then
|
||||
echo "Bad KUBE_USER format."
|
||||
if [[ ! -e "${KUBE_HOME}/kube-env" ]]; then
|
||||
echo "The ${KUBE_HOME}/kube-env file does not exist!! Terminate cluster initialization."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# generate the controller manager and scheduler tokens here since they are only used on the master.
|
||||
KUBE_CONTROLLER_MANAGER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
KUBE_SCHEDULER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
source "${KUBE_HOME}/kube-env"
|
||||
|
||||
setup-os-params
|
||||
config-ip-firewall
|
||||
create-dirs
|
||||
setup-kubelet-dir
|
||||
ensure-local-ssds
|
||||
setup-logrotate
|
||||
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
|
||||
mount-master-pd
|
||||
create-node-pki
|
||||
create-master-pki
|
||||
create-master-auth
|
||||
create-master-kubelet-auth
|
||||
create-master-etcd-auth
|
||||
override-pv-recycler
|
||||
|
||||
if [[ -f "${KUBE_HOME}/kubelet-config.yaml" ]]; then
|
||||
echo "Found Kubelet config file at ${KUBE_HOME}/kubelet-config.yaml"
|
||||
KUBELET_CONFIG_FILE_ARG="--config ${KUBE_HOME}/kubelet-config.yaml"
|
||||
fi
|
||||
|
||||
if [[ -e "${KUBE_HOME}/kube-master-certs" ]]; then
|
||||
source "${KUBE_HOME}/kube-master-certs"
|
||||
fi
|
||||
|
||||
if [[ -n "${KUBE_USER:-}" ]]; then
|
||||
if ! [[ "${KUBE_USER}" =~ ^[-._@a-zA-Z0-9]+$ ]]; then
|
||||
echo "Bad KUBE_USER format."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# generate the controller manager, scheduler and cluster autoscaler tokens here since they are only used on the master.
|
||||
KUBE_CONTROLLER_MANAGER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
KUBE_SCHEDULER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
KUBE_CLUSTER_AUTOSCALER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
|
||||
setup-os-params
|
||||
config-ip-firewall
|
||||
create-dirs
|
||||
setup-kubelet-dir
|
||||
ensure-local-ssds
|
||||
setup-logrotate
|
||||
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
|
||||
mount-master-pd
|
||||
create-node-pki
|
||||
create-master-pki
|
||||
create-master-auth
|
||||
create-master-kubelet-auth
|
||||
create-master-etcd-auth
|
||||
override-pv-recycler
|
||||
gke-master-start
|
||||
else
|
||||
create-node-pki
|
||||
create-kubelet-kubeconfig ${KUBERNETES_MASTER_NAME}
|
||||
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
|
||||
create-kubeproxy-user-kubeconfig
|
||||
fi
|
||||
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
|
||||
create-node-problem-detector-kubeconfig
|
||||
fi
|
||||
fi
|
||||
|
||||
override-kubectl
|
||||
# Run the containerized mounter once to pre-cache the container image.
|
||||
if [[ "${CONTAINER_RUNTIME:-docker}" == "docker" ]]; then
|
||||
assemble-docker-flags
|
||||
fi
|
||||
start-kubelet
|
||||
|
||||
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
|
||||
compute-master-manifest-variables
|
||||
start-etcd-servers
|
||||
start-etcd-empty-dir-cleanup-pod
|
||||
start-kube-apiserver
|
||||
start-kube-controller-manager
|
||||
start-kube-scheduler
|
||||
start-kube-addons
|
||||
start-cluster-autoscaler
|
||||
start-lb-controller
|
||||
start-rescheduler
|
||||
else
|
||||
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
|
||||
start-kube-proxy
|
||||
fi
|
||||
if [[ "${PREPULL_E2E_IMAGES:-}" == "true" ]]; then
|
||||
start-image-puller
|
||||
fi
|
||||
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
|
||||
start-node-problem-detector
|
||||
fi
|
||||
fi
|
||||
reset-motd
|
||||
prepare-mounter-rootfs
|
||||
modprobe configs
|
||||
echo "Done for the configuration for kubernetes"
|
||||
}
|
||||
|
||||
# use --source-only to test functions defined in this script.
|
||||
if [[ "$#" -eq 1 && "${1}" == "--source-only" ]]; then
|
||||
:
|
||||
else
|
||||
create-node-pki
|
||||
create-kubelet-kubeconfig ${KUBERNETES_MASTER_NAME}
|
||||
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
|
||||
create-kubeproxy-user-kubeconfig
|
||||
fi
|
||||
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
|
||||
create-node-problem-detector-kubeconfig
|
||||
fi
|
||||
main "${@}"
|
||||
fi
|
||||
|
||||
override-kubectl
|
||||
# Run the containerized mounter once to pre-cache the container image.
|
||||
if [[ "${CONTAINER_RUNTIME:-docker}" == "docker" ]]; then
|
||||
assemble-docker-flags
|
||||
fi
|
||||
start-kubelet
|
||||
|
||||
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
|
||||
compute-master-manifest-variables
|
||||
start-etcd-servers
|
||||
start-etcd-empty-dir-cleanup-pod
|
||||
start-kube-apiserver
|
||||
start-kube-controller-manager
|
||||
start-kube-scheduler
|
||||
start-kube-addons
|
||||
start-cluster-autoscaler
|
||||
start-lb-controller
|
||||
start-rescheduler
|
||||
else
|
||||
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
|
||||
start-kube-proxy
|
||||
fi
|
||||
if [[ "${PREPULL_E2E_IMAGES:-}" == "true" ]]; then
|
||||
start-image-puller
|
||||
fi
|
||||
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
|
||||
start-node-problem-detector
|
||||
fi
|
||||
fi
|
||||
reset-motd
|
||||
prepare-mounter-rootfs
|
||||
modprobe configs
|
||||
echo "Done for the configuration for kubernetes"
|
||||
|
135
vendor/k8s.io/kubernetes/cluster/gce/gci/configure.sh
generated
vendored
135
vendor/k8s.io/kubernetes/cluster/gce/gci/configure.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
@ -25,9 +25,11 @@ set -o pipefail
|
||||
|
||||
### Hardcoded constants
|
||||
DEFAULT_CNI_VERSION="v0.6.0"
|
||||
DEFAULT_CNI_SHA1="d595d3ded6499a64e8dac02466e2f5f2ce257c9f"
|
||||
DEFAULT_NPD_VERSION="v0.4.1"
|
||||
DEFAULT_NPD_SHA1="a57a3fe64cab8a18ec654f5cef0aec59dae62568"
|
||||
DEFAULT_CNI_SHA1="d595d3ded6499a64e8dac02466e2f5f2ce257c9f"
|
||||
DEFAULT_NPD_VERSION="v0.5.0"
|
||||
DEFAULT_NPD_SHA1="650ecfb2ae495175ee43706d0bd862a1ea7f1395"
|
||||
DEFAULT_CRICTL_VERSION="v1.11.0"
|
||||
DEFAULT_CRICTL_SHA1="8f5142b985d314cdebb51afd55054d5ec00c442a"
|
||||
DEFAULT_MOUNTER_TAR_SHA="8003b798cf33c7f91320cd6ee5cec4fa22244571"
|
||||
###
|
||||
|
||||
@ -54,37 +56,59 @@ EOF
|
||||
|
||||
function download-kube-env {
|
||||
# Fetch kube-env from GCE metadata server.
|
||||
(umask 700;
|
||||
local -r tmp_kube_env="/tmp/kube-env.yaml"
|
||||
curl --fail --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --silent --show-error \
|
||||
-H "X-Google-Metadata-Request: True" \
|
||||
-o "${tmp_kube_env}" \
|
||||
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env
|
||||
# Convert the yaml format file into a shell-style file.
|
||||
eval $(python -c '''
|
||||
(
|
||||
umask 077
|
||||
local -r tmp_kube_env="/tmp/kube-env.yaml"
|
||||
curl --fail --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --silent --show-error \
|
||||
-H "X-Google-Metadata-Request: True" \
|
||||
-o "${tmp_kube_env}" \
|
||||
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env
|
||||
# Convert the yaml format file into a shell-style file.
|
||||
eval $(python -c '''
|
||||
import pipes,sys,yaml
|
||||
for k,v in yaml.load(sys.stdin).iteritems():
|
||||
print("readonly {var}={value}".format(var = k, value = pipes.quote(str(v))))
|
||||
''' < "${tmp_kube_env}" > "${KUBE_HOME}/kube-env")
|
||||
rm -f "${tmp_kube_env}"
|
||||
rm -f "${tmp_kube_env}"
|
||||
)
|
||||
}
|
||||
|
||||
function download-kubelet-config {
|
||||
local -r dest="$1"
|
||||
echo "Downloading Kubelet config file, if it exists"
|
||||
# Fetch kubelet config file from GCE metadata server.
|
||||
(
|
||||
umask 077
|
||||
local -r tmp_kubelet_config="/tmp/kubelet-config.yaml"
|
||||
if curl --fail --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --silent --show-error \
|
||||
-H "X-Google-Metadata-Request: True" \
|
||||
-o "${tmp_kubelet_config}" \
|
||||
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kubelet-config; then
|
||||
# only write to the final location if curl succeeds
|
||||
mv "${tmp_kubelet_config}" "${dest}"
|
||||
elif [[ "${REQUIRE_METADATA_KUBELET_CONFIG_FILE:-false}" == "true" ]]; then
|
||||
echo "== Failed to download required Kubelet config file from metadata server =="
|
||||
exit 1
|
||||
fi
|
||||
)
|
||||
}
|
||||
|
||||
function download-kube-master-certs {
|
||||
# Fetch kube-env from GCE metadata server.
|
||||
(umask 700;
|
||||
local -r tmp_kube_master_certs="/tmp/kube-master-certs.yaml"
|
||||
curl --fail --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --silent --show-error \
|
||||
-H "X-Google-Metadata-Request: True" \
|
||||
-o "${tmp_kube_master_certs}" \
|
||||
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-master-certs
|
||||
# Convert the yaml format file into a shell-style file.
|
||||
eval $(python -c '''
|
||||
(
|
||||
umask 077
|
||||
local -r tmp_kube_master_certs="/tmp/kube-master-certs.yaml"
|
||||
curl --fail --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --silent --show-error \
|
||||
-H "X-Google-Metadata-Request: True" \
|
||||
-o "${tmp_kube_master_certs}" \
|
||||
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-master-certs
|
||||
# Convert the yaml format file into a shell-style file.
|
||||
eval $(python -c '''
|
||||
import pipes,sys,yaml
|
||||
for k,v in yaml.load(sys.stdin).iteritems():
|
||||
print("readonly {var}={value}".format(var = k, value = pipes.quote(str(v))))
|
||||
''' < "${tmp_kube_master_certs}" > "${KUBE_HOME}/kube-master-certs")
|
||||
rm -f "${tmp_kube_master_certs}"
|
||||
rm -f "${tmp_kube_master_certs}"
|
||||
)
|
||||
}
|
||||
|
||||
@ -175,15 +199,15 @@ function install-node-problem-detector {
|
||||
local -r npd_version="${DEFAULT_NPD_VERSION}"
|
||||
local -r npd_sha1="${DEFAULT_NPD_SHA1}"
|
||||
fi
|
||||
local -r npd_tar="node-problem-detector-${npd_version}.tar.gz"
|
||||
|
||||
if is-preloaded "node-problem-detector" "${npd_sha1}"; then
|
||||
if is-preloaded "${npd_tar}" "${npd_sha1}"; then
|
||||
echo "node-problem-detector is preloaded."
|
||||
return
|
||||
fi
|
||||
|
||||
echo "Downloading node problem detector."
|
||||
local -r npd_release_path="https://storage.googleapis.com/kubernetes-release"
|
||||
local -r npd_tar="node-problem-detector-${npd_version}.tar.gz"
|
||||
download-or-bust "${npd_sha1}" "${npd_release_path}/node-problem-detector/${npd_tar}"
|
||||
local -r npd_dir="${KUBE_HOME}/node-problem-detector"
|
||||
mkdir -p "${npd_dir}"
|
||||
@ -212,6 +236,47 @@ function install-cni-binaries {
|
||||
rm -f "${KUBE_HOME}/${cni_tar}"
|
||||
}
|
||||
|
||||
# Install crictl binary.
|
||||
function install-crictl {
|
||||
if [[ -n "${CRICTL_VERSION:-}" ]]; then
|
||||
local -r crictl_version="${CRICTL_VERSION}"
|
||||
local -r crictl_sha1="${CRICTL_TAR_HASH}"
|
||||
else
|
||||
local -r crictl_version="${DEFAULT_CRICTL_VERSION}"
|
||||
local -r crictl_sha1="${DEFAULT_CRICTL_SHA1}"
|
||||
fi
|
||||
local -r crictl="crictl-${crictl_version}-linux-amd64"
|
||||
|
||||
if is-preloaded "${crictl}" "${crictl_sha1}"; then
|
||||
echo "crictl is preloaded"
|
||||
return
|
||||
fi
|
||||
|
||||
echo "Downloading crictl"
|
||||
local -r crictl_path="https://storage.googleapis.com/kubernetes-release/crictl"
|
||||
download-or-bust "${crictl_sha1}" "${crictl_path}/${crictl}"
|
||||
mv "${KUBE_HOME}/${crictl}" "${KUBE_BIN}/crictl"
|
||||
chmod a+x "${KUBE_BIN}/crictl"
|
||||
|
||||
# Create crictl config file.
|
||||
cat > /etc/crictl.yaml <<EOF
|
||||
runtime-endpoint: ${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock}
|
||||
EOF
|
||||
}
|
||||
|
||||
function install-exec-auth-plugin {
|
||||
if [[ ! "${EXEC_AUTH_PLUGIN_URL:-}" ]]; then
|
||||
return
|
||||
fi
|
||||
local -r plugin_url="${EXEC_AUTH_PLUGIN_URL}"
|
||||
local -r plugin_sha1="${EXEC_AUTH_PLUGIN_SHA1}"
|
||||
|
||||
echo "Downloading gke-exec-auth-plugin binary"
|
||||
download-or-bust "${plugin_sha1}" "${plugin_url}"
|
||||
mv "${KUBE_HOME}/gke-exec-auth-plugin" "${KUBE_BIN}/gke-exec-auth-plugin"
|
||||
chmod a+x "${KUBE_BIN}/gke-exec-auth-plugin"
|
||||
}
|
||||
|
||||
function install-kube-manifests {
|
||||
# Put kube-system pods manifests in ${KUBE_HOME}/kube-manifests/.
|
||||
local dst_dir="${KUBE_HOME}/kube-manifests"
|
||||
@ -242,6 +307,10 @@ function install-kube-manifests {
|
||||
xargs sed -ri "s@(image\":\s+\")k8s.gcr.io@\1${kube_addon_registry}@"
|
||||
fi
|
||||
cp "${dst_dir}/kubernetes/gci-trusty/gci-configure-helper.sh" "${KUBE_BIN}/configure-helper.sh"
|
||||
if [[ -e "${dst_dir}/kubernetes/gci-trusty/gke-internal-configure-helper.sh" ]]; then
|
||||
cp "${dst_dir}/kubernetes/gci-trusty/gke-internal-configure-helper.sh" "${KUBE_BIN}/"
|
||||
fi
|
||||
|
||||
cp "${dst_dir}/kubernetes/gci-trusty/health-monitor.sh" "${KUBE_BIN}/health-monitor.sh"
|
||||
|
||||
rm -f "${KUBE_HOME}/${manifests_tar}"
|
||||
@ -348,6 +417,13 @@ function install-kube-binary-config {
|
||||
remount-flexvolume-directory "${VOLUME_PLUGIN_DIR}"
|
||||
fi
|
||||
|
||||
# Install crictl on each node.
|
||||
install-crictl
|
||||
|
||||
if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then
|
||||
install-exec-auth-plugin
|
||||
fi
|
||||
|
||||
# Clean up.
|
||||
rm -rf "${KUBE_HOME}/kubernetes"
|
||||
rm -f "${KUBE_HOME}/${server_binary_tar}"
|
||||
@ -356,13 +432,24 @@ function install-kube-binary-config {
|
||||
|
||||
######### Main Function ##########
|
||||
echo "Start to install kubernetes files"
|
||||
# if install fails, message-of-the-day (motd) will warn at login shell
|
||||
set-broken-motd
|
||||
|
||||
KUBE_HOME="/home/kubernetes"
|
||||
KUBE_BIN="${KUBE_HOME}/bin"
|
||||
|
||||
# download and source kube-env
|
||||
download-kube-env
|
||||
source "${KUBE_HOME}/kube-env"
|
||||
|
||||
download-kubelet-config "${KUBE_HOME}/kubelet-config.yaml"
|
||||
|
||||
# master certs
|
||||
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
|
||||
download-kube-master-certs
|
||||
fi
|
||||
|
||||
# binaries and kube-system manifests
|
||||
install-kube-binary-config
|
||||
|
||||
echo "Done for installing kubernetes files"
|
||||
|
172
vendor/k8s.io/kubernetes/cluster/gce/gci/configure_helper_test.go
generated
vendored
Normal file
172
vendor/k8s.io/kubernetes/cluster/gce/gci/configure_helper_test.go
generated
vendored
Normal file
@ -0,0 +1,172 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gci
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"text/template"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
)
|
||||
|
||||
const (
|
||||
envScriptFileName = "kube-env"
|
||||
configureHelperScriptName = "configure-helper.sh"
|
||||
)
|
||||
|
||||
type ManifestTestCase struct {
|
||||
pod v1.Pod
|
||||
envScriptPath string
|
||||
manifest string
|
||||
auxManifests []string
|
||||
kubeHome string
|
||||
manifestSources string
|
||||
manifestDestination string
|
||||
manifestTemplateDir string
|
||||
manifestTemplate string
|
||||
manifestFuncName string
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func newManifestTestCase(t *testing.T, manifest, funcName string, auxManifests []string) *ManifestTestCase {
|
||||
c := &ManifestTestCase{
|
||||
t: t,
|
||||
manifest: manifest,
|
||||
auxManifests: auxManifests,
|
||||
manifestFuncName: funcName,
|
||||
}
|
||||
|
||||
d, err := ioutil.TempDir("", "configure-helper-test")
|
||||
if err != nil {
|
||||
c.t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
|
||||
c.kubeHome = d
|
||||
c.envScriptPath = filepath.Join(c.kubeHome, envScriptFileName)
|
||||
c.manifestSources = filepath.Join(c.kubeHome, "kube-manifests", "kubernetes", "gci-trusty")
|
||||
|
||||
currentPath, err := os.Getwd()
|
||||
if err != nil {
|
||||
c.t.Fatalf("Failed to get current directory: %v", err)
|
||||
}
|
||||
gceDir := filepath.Dir(currentPath)
|
||||
c.manifestTemplateDir = filepath.Join(gceDir, "manifests")
|
||||
c.manifestTemplate = filepath.Join(c.manifestTemplateDir, c.manifest)
|
||||
c.manifestDestination = filepath.Join(c.kubeHome, "etc", "kubernetes", "manifests", c.manifest)
|
||||
|
||||
c.mustCopyFromTemplate()
|
||||
c.mustCopyAuxFromTemplate()
|
||||
c.mustCreateManifestDstDir()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *ManifestTestCase) mustCopyFromTemplate() {
|
||||
if err := os.MkdirAll(c.manifestSources, os.ModePerm); err != nil {
|
||||
c.t.Fatalf("Failed to create source directory: %v", err)
|
||||
}
|
||||
|
||||
if err := copyFile(c.manifestTemplate, filepath.Join(c.manifestSources, c.manifest)); err != nil {
|
||||
c.t.Fatalf("Failed to copy source manifest to KUBE_HOME: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ManifestTestCase) mustCopyAuxFromTemplate() {
|
||||
for _, m := range c.auxManifests {
|
||||
err := copyFile(filepath.Join(c.manifestTemplateDir, m), filepath.Join(c.manifestSources, m))
|
||||
if err != nil {
|
||||
c.t.Fatalf("Failed to copy source manifest %s to KUBE_HOME: %v", m, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ManifestTestCase) mustCreateManifestDstDir() {
|
||||
p := filepath.Join(filepath.Join(c.kubeHome, "etc", "kubernetes", "manifests"))
|
||||
if err := os.MkdirAll(p, os.ModePerm); err != nil {
|
||||
c.t.Fatalf("Failed to create designation folder for kube-apiserver.manifest: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ManifestTestCase) mustCreateEnv(envTemplate string, env interface{}) {
|
||||
f, err := os.Create(filepath.Join(c.kubeHome, envScriptFileName))
|
||||
if err != nil {
|
||||
c.t.Fatalf("Failed to create envScript: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
t := template.Must(template.New("env").Parse(envTemplate))
|
||||
|
||||
if err = t.Execute(f, env); err != nil {
|
||||
c.t.Fatalf("Failed to execute template: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ManifestTestCase) mustInvokeFunc(envTemplate string, env interface{}) {
|
||||
c.mustCreateEnv(envTemplate, env)
|
||||
args := fmt.Sprintf("source %s ; source %s --source-only ; %s", c.envScriptPath, configureHelperScriptName, c.manifestFuncName)
|
||||
cmd := exec.Command("bash", "-c", args)
|
||||
|
||||
bs, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
c.t.Logf("%s", bs)
|
||||
c.t.Fatalf("Failed to run configure-helper.sh: %v", err)
|
||||
}
|
||||
c.t.Logf("%s", string(bs))
|
||||
}
|
||||
|
||||
func (c *ManifestTestCase) mustLoadPodFromManifest() {
|
||||
json, err := ioutil.ReadFile(c.manifestDestination)
|
||||
if err != nil {
|
||||
c.t.Fatalf("Failed to read manifest: %s, %v", c.manifestDestination, err)
|
||||
}
|
||||
|
||||
if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), json, &c.pod); err != nil {
|
||||
c.t.Fatalf("Failed to decode manifest: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ManifestTestCase) tearDown() {
|
||||
os.RemoveAll(c.kubeHome)
|
||||
}
|
||||
|
||||
func copyFile(src, dst string) (err error) {
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer in.Close()
|
||||
out, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
cerr := out.Close()
|
||||
if cerr == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
_, err = io.Copy(out, in)
|
||||
return err
|
||||
}
|
8
vendor/k8s.io/kubernetes/cluster/gce/gci/flexvolume_node_setup.sh
generated
vendored
8
vendor/k8s.io/kubernetes/cluster/gce/gci/flexvolume_node_setup.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
@ -81,10 +81,10 @@ flex_clean() {
|
||||
umount_silent ${MOUNTER_PATH}
|
||||
rm -rf ${MOUNTER_PATH}
|
||||
|
||||
if [ -n ${IMAGE_URL:-} ]; then
|
||||
if [[ -n ${IMAGE_URL:-} ]]; then
|
||||
docker rmi -f ${IMAGE_URL} &> /dev/null || /bin/true
|
||||
fi
|
||||
if [ -n ${MOUNTER_DEFAULT_NAME:-} ]; then
|
||||
if [[ -n ${MOUNTER_DEFAULT_NAME:-} ]]; then
|
||||
docker rm -f ${MOUNTER_DEFAULT_NAME} &> /dev/null || /bin/true
|
||||
fi
|
||||
}
|
||||
@ -119,7 +119,7 @@ generate_chroot_wrapper() {
|
||||
|
||||
mkdir -p $wrapper_dir
|
||||
cat >$wrapper_path <<EOF
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
chroot ${MOUNTER_PATH} ${driver_path} "\$@"
|
||||
EOF
|
||||
|
||||
|
51
vendor/k8s.io/kubernetes/cluster/gce/gci/health-monitor.sh
generated
vendored
51
vendor/k8s.io/kubernetes/cluster/gce/gci/health-monitor.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
@ -24,11 +24,39 @@ set -o pipefail
|
||||
|
||||
# We simply kill the process when there is a failure. Another systemd service will
|
||||
# automatically restart the process.
|
||||
function docker_monitoring {
|
||||
while [ 1 ]; do
|
||||
if ! timeout 60 docker ps > /dev/null; then
|
||||
echo "Docker daemon failed!"
|
||||
pkill docker
|
||||
function container_runtime_monitoring {
|
||||
local -r max_attempts=5
|
||||
local attempt=1
|
||||
local -r crictl="${KUBE_HOME}/bin/crictl"
|
||||
local -r container_runtime_name="${CONTAINER_RUNTIME_NAME:-docker}"
|
||||
# We still need to use `docker ps` when container runtime is "docker". This is because
|
||||
# dockershim is still part of kubelet today. When kubelet is down, crictl pods
|
||||
# will also fail, and docker will be killed. This is undesirable especially when
|
||||
# docker live restore is disabled.
|
||||
local healthcheck_command="docker ps"
|
||||
if [[ "${CONTAINER_RUNTIME:-docker}" != "docker" ]]; then
|
||||
healthcheck_command="${crictl} pods"
|
||||
fi
|
||||
# Container runtime startup takes time. Make initial attempts before starting
|
||||
# killing the container runtime.
|
||||
until timeout 60 ${healthcheck_command} > /dev/null; do
|
||||
if (( attempt == max_attempts )); then
|
||||
echo "Max attempt ${max_attempts} reached! Proceeding to monitor container runtime healthiness."
|
||||
break
|
||||
fi
|
||||
echo "$attempt initial attempt \"${healthcheck_command}\"! Trying again in $attempt seconds..."
|
||||
sleep "$(( 2 ** attempt++ ))"
|
||||
done
|
||||
while true; do
|
||||
if ! timeout 60 ${healthcheck_command} > /dev/null; then
|
||||
echo "Container runtime ${container_runtime_name} failed!"
|
||||
if [[ "$container_runtime_name" == "docker" ]]; then
|
||||
# Dump stack of docker daemon for investigation.
|
||||
# Log fle name looks like goroutine-stacks-TIMESTAMP and will be saved to
|
||||
# the exec root directory, which is /var/run/docker/ on Ubuntu and COS.
|
||||
pkill -SIGUSR1 dockerd
|
||||
fi
|
||||
systemctl kill --kill-who=main "${container_runtime_name}"
|
||||
# Wait for a while, as we don't want to kill it again before it is really up.
|
||||
sleep 120
|
||||
else
|
||||
@ -48,7 +76,7 @@ function kubelet_monitoring {
|
||||
# Print the response and/or errors.
|
||||
echo $output
|
||||
echo "Kubelet is unhealthy!"
|
||||
pkill kubelet
|
||||
systemctl kill kubelet
|
||||
# Wait for a while, as we don't want to kill it again before it is really up.
|
||||
sleep 60
|
||||
else
|
||||
@ -60,11 +88,12 @@ function kubelet_monitoring {
|
||||
|
||||
############## Main Function ################
|
||||
if [[ "$#" -ne 1 ]]; then
|
||||
echo "Usage: health-monitor.sh <docker/kubelet>"
|
||||
echo "Usage: health-monitor.sh <container-runtime/kubelet>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
KUBE_ENV="/home/kubernetes/kube-env"
|
||||
KUBE_HOME="/home/kubernetes"
|
||||
KUBE_ENV="${KUBE_HOME}/kube-env"
|
||||
if [[ ! -e "${KUBE_ENV}" ]]; then
|
||||
echo "The ${KUBE_ENV} file does not exist!! Terminate health monitoring"
|
||||
exit 1
|
||||
@ -74,8 +103,8 @@ SLEEP_SECONDS=10
|
||||
component=$1
|
||||
echo "Start kubernetes health monitoring for ${component}"
|
||||
source "${KUBE_ENV}"
|
||||
if [[ "${component}" == "docker" ]]; then
|
||||
docker_monitoring
|
||||
if [[ "${component}" == "container-runtime" ]]; then
|
||||
container_runtime_monitoring
|
||||
elif [[ "${component}" == "kubelet" ]]; then
|
||||
kubelet_monitoring
|
||||
else
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/gce/gci/helper.sh
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/gce/gci/helper.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
|
5
vendor/k8s.io/kubernetes/cluster/gce/gci/master-helper.sh
generated
vendored
5
vendor/k8s.io/kubernetes/cluster/gce/gci/master-helper.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
@ -68,6 +68,7 @@ function replicate-master-instance() {
|
||||
get-metadata "${existing_master_zone}" "${existing_master_name}" gci-ensure-gke-docker > "${KUBE_TEMP}/gci-ensure-gke-docker.txt"
|
||||
get-metadata "${existing_master_zone}" "${existing_master_name}" gci-docker-version > "${KUBE_TEMP}/gci-docker-version.txt"
|
||||
get-metadata "${existing_master_zone}" "${existing_master_name}" kube-master-certs > "${KUBE_TEMP}/kube-master-certs.yaml"
|
||||
get-metadata "${existing_master_zone}" "${existing_master_name}" cluster-location > "${KUBE_TEMP}/cluster-location.txt"
|
||||
|
||||
create-master-instance-internal "${REPLICA_NAME}"
|
||||
}
|
||||
@ -106,6 +107,7 @@ function create-master-instance-internal() {
|
||||
"${address:-}" "${enable_ip_aliases:-}" "${IP_ALIAS_SIZE:-}")
|
||||
|
||||
local metadata="kube-env=${KUBE_TEMP}/master-kube-env.yaml"
|
||||
metadata="${metadata},kubelet-config=${KUBE_TEMP}/master-kubelet-config.yaml"
|
||||
metadata="${metadata},user-data=${KUBE_ROOT}/cluster/gce/gci/master.yaml"
|
||||
metadata="${metadata},configure-sh=${KUBE_ROOT}/cluster/gce/gci/configure.sh"
|
||||
metadata="${metadata},cluster-location=${KUBE_TEMP}/cluster-location.txt"
|
||||
@ -114,6 +116,7 @@ function create-master-instance-internal() {
|
||||
metadata="${metadata},gci-ensure-gke-docker=${KUBE_TEMP}/gci-ensure-gke-docker.txt"
|
||||
metadata="${metadata},gci-docker-version=${KUBE_TEMP}/gci-docker-version.txt"
|
||||
metadata="${metadata},kube-master-certs=${KUBE_TEMP}/kube-master-certs.yaml"
|
||||
metadata="${metadata},cluster-location=${KUBE_TEMP}/cluster-location.txt"
|
||||
metadata="${metadata},${MASTER_EXTRA_METADATA}"
|
||||
|
||||
local disk="name=${master_name}-pd"
|
||||
|
8
vendor/k8s.io/kubernetes/cluster/gce/gci/master.yaml
generated
vendored
8
vendor/k8s.io/kubernetes/cluster/gce/gci/master.yaml
generated
vendored
@ -40,12 +40,12 @@ write_files:
|
||||
[Install]
|
||||
WantedBy=kubernetes.target
|
||||
|
||||
- path: /etc/systemd/system/kube-docker-monitor.service
|
||||
- path: /etc/systemd/system/kube-container-runtime-monitor.service
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes health monitoring for docker
|
||||
Description=Kubernetes health monitoring for container runtime
|
||||
After=kube-master-configuration.service
|
||||
|
||||
[Service]
|
||||
@ -54,7 +54,7 @@ write_files:
|
||||
RemainAfterExit=yes
|
||||
RemainAfterExit=yes
|
||||
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/health-monitor.sh
|
||||
ExecStart=/home/kubernetes/bin/health-monitor.sh docker
|
||||
ExecStart=/home/kubernetes/bin/health-monitor.sh container-runtime
|
||||
|
||||
[Install]
|
||||
WantedBy=kubernetes.target
|
||||
@ -120,7 +120,7 @@ runcmd:
|
||||
- systemctl daemon-reload
|
||||
- systemctl enable kube-master-installation.service
|
||||
- systemctl enable kube-master-configuration.service
|
||||
- systemctl enable kube-docker-monitor.service
|
||||
- systemctl enable kube-container-runtime-monitor.service
|
||||
- systemctl enable kubelet-monitor.service
|
||||
- systemctl enable kube-logrotate.timer
|
||||
- systemctl enable kube-logrotate.service
|
||||
|
21
vendor/k8s.io/kubernetes/cluster/gce/gci/mounter/stage-upload.sh
generated
vendored
21
vendor/k8s.io/kubernetes/cluster/gce/gci/mounter/stage-upload.sh
generated
vendored
@ -23,12 +23,10 @@ set -o errexit
|
||||
set -o pipefail
|
||||
set -o nounset
|
||||
|
||||
RKT_VERSION="v1.18.0"
|
||||
DOCKER2ACI_VERSION="v0.13.0"
|
||||
MOUNTER_VERSION=$1
|
||||
DOCKER_IMAGE=docker://$2
|
||||
MOUNTER_ACI_IMAGE=gci-mounter-${MOUNTER_VERSION}.aci
|
||||
RKT_GCS_DIR=gs://kubernetes-release/rkt/
|
||||
MOUNTER_GCS_DIR=gs://kubernetes-release/gci-mounter/
|
||||
|
||||
TMPDIR=/tmp
|
||||
@ -37,7 +35,6 @@ DOWNLOAD_DIR=$(mktemp --tmpdir=${TMPDIR} -d gci-mounter-build.XXXXXXXXXX)
|
||||
|
||||
# Setup a staging directory
|
||||
STAGING_DIR=$(mktemp --tmpdir=${TMPDIR} -d gci-mounter-staging.XXXXXXXXXX)
|
||||
RKT_DIR=${STAGING_DIR}/${RKT_VERSION}
|
||||
ACI_DIR=${STAGING_DIR}/gci-mounter
|
||||
CWD=${PWD}
|
||||
|
||||
@ -51,20 +48,8 @@ function cleanup {
|
||||
# Delete temporary directories on exit
|
||||
trap cleanup EXIT
|
||||
|
||||
mkdir ${RKT_DIR}
|
||||
mkdir ${ACI_DIR}
|
||||
|
||||
# Download rkt
|
||||
cd ${DOWNLOAD_DIR}
|
||||
echo "Downloading rkt ${RKT_VERSION}"
|
||||
wget "https://github.com/coreos/rkt/releases/download/${RKT_VERSION}/rkt-${RKT_VERSION}.tar.gz" &> /dev/null
|
||||
echo "Extracting rkt ${RKT_VERSION}"
|
||||
tar xzf rkt-${RKT_VERSION}.tar.gz
|
||||
|
||||
# Stage rkt into working directory
|
||||
cp rkt-${RKT_VERSION}/rkt ${RKT_DIR}/rkt
|
||||
cp rkt-${RKT_VERSION}/stage1-fly.aci ${RKT_DIR}/
|
||||
|
||||
# Convert docker image to aci and stage it
|
||||
echo "Downloading docker2aci ${DOCKER2ACI_VERSION}"
|
||||
wget "https://github.com/appc/docker2aci/releases/download/${DOCKER2ACI_VERSION}/docker2aci-${DOCKER2ACI_VERSION}.tar.gz" &> /dev/null
|
||||
@ -74,13 +59,9 @@ ACI_IMAGE=$(${DOWNLOAD_DIR}/docker2aci-${DOCKER2ACI_VERSION}/docker2aci ${DOCKER
|
||||
cp ${ACI_IMAGE} ${ACI_DIR}/${MOUNTER_ACI_IMAGE}
|
||||
|
||||
# Upload the contents to gcs
|
||||
echo "Uploading rkt artifacts in ${RKT_DIR} to ${RKT_GCS_DIR}"
|
||||
gsutil cp -R ${RKT_DIR} ${RKT_GCS_DIR}
|
||||
echo "Uploading gci mounter ACI in ${ACI_DIR} to ${MOUNTER_GCS_DIR}"
|
||||
gsutil cp ${ACI_DIR}/${MOUNTER_ACI_IMAGE} ${MOUNTER_GCS_DIR}
|
||||
|
||||
echo "Upload completed"
|
||||
echo "Update rkt, stag1-fly.aci & gci-mounter ACI versions and SHA1 in cluster/gce/gci/configure.sh"
|
||||
echo "${RKT_VERSION}/rkt sha1: $(sha1sum ${RKT_DIR}/rkt)"
|
||||
echo "${RKT_VERSION}/stage1-fly.aci sha1: $(sha1sum ${RKT_DIR}/stage1-fly.aci)"
|
||||
echo "Updated gci-mounter ACI version and SHA1 in cluster/gce/gci/configure.sh"
|
||||
echo "${MOUNTER_ACI_IMAGE} hash: $(sha1sum ${ACI_DIR}/${MOUNTER_ACI_IMAGE})"
|
||||
|
4
vendor/k8s.io/kubernetes/cluster/gce/gci/node-helper.sh
generated
vendored
4
vendor/k8s.io/kubernetes/cluster/gce/gci/node-helper.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
@ -20,6 +20,7 @@ source "${KUBE_ROOT}/cluster/gce/gci/helper.sh"
|
||||
function get-node-instance-metadata {
|
||||
local metadata=""
|
||||
metadata+="kube-env=${KUBE_TEMP}/node-kube-env.yaml,"
|
||||
metadata+="kubelet-config=${KUBE_TEMP}/node-kubelet-config.yaml,"
|
||||
metadata+="user-data=${KUBE_ROOT}/cluster/gce/gci/node.yaml,"
|
||||
metadata+="configure-sh=${KUBE_ROOT}/cluster/gce/gci/configure.sh,"
|
||||
metadata+="cluster-location=${KUBE_TEMP}/cluster-location.txt,"
|
||||
@ -27,6 +28,7 @@ function get-node-instance-metadata {
|
||||
metadata+="gci-update-strategy=${KUBE_TEMP}/gci-update.txt,"
|
||||
metadata+="gci-ensure-gke-docker=${KUBE_TEMP}/gci-ensure-gke-docker.txt,"
|
||||
metadata+="gci-docker-version=${KUBE_TEMP}/gci-docker-version.txt,"
|
||||
metadata+="shutdown-script=${KUBE_ROOT}/cluster/gce/gci/shutdown.sh,"
|
||||
metadata+="${NODE_EXTRA_METADATA}"
|
||||
echo "${metadata}"
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/cluster/gce/gci/node.yaml
generated
vendored
8
vendor/k8s.io/kubernetes/cluster/gce/gci/node.yaml
generated
vendored
@ -40,12 +40,12 @@ write_files:
|
||||
[Install]
|
||||
WantedBy=kubernetes.target
|
||||
|
||||
- path: /etc/systemd/system/kube-docker-monitor.service
|
||||
- path: /etc/systemd/system/kube-container-runtime-monitor.service
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes health monitoring for docker
|
||||
Description=Kubernetes health monitoring for container runtime
|
||||
After=kube-node-configuration.service
|
||||
|
||||
[Service]
|
||||
@ -54,7 +54,7 @@ write_files:
|
||||
RemainAfterExit=yes
|
||||
RemainAfterExit=yes
|
||||
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/health-monitor.sh
|
||||
ExecStart=/home/kubernetes/bin/health-monitor.sh docker
|
||||
ExecStart=/home/kubernetes/bin/health-monitor.sh container-runtime
|
||||
|
||||
[Install]
|
||||
WantedBy=kubernetes.target
|
||||
@ -120,7 +120,7 @@ runcmd:
|
||||
- systemctl daemon-reload
|
||||
- systemctl enable kube-node-installation.service
|
||||
- systemctl enable kube-node-configuration.service
|
||||
- systemctl enable kube-docker-monitor.service
|
||||
- systemctl enable kube-container-runtime-monitor.service
|
||||
- systemctl enable kubelet-monitor.service
|
||||
- systemctl enable kube-logrotate.timer
|
||||
- systemctl enable kube-logrotate.service
|
||||
|
23
vendor/k8s.io/kubernetes/cluster/gce/gci/shutdown.sh
generated
vendored
Executable file
23
vendor/k8s.io/kubernetes/cluster/gce/gci/shutdown.sh
generated
vendored
Executable file
@ -0,0 +1,23 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# A script that let's gci preemptible nodes gracefully terminate in the event of a VM shutdown.
|
||||
preemptible=$(curl "http://metadata.google.internal/computeMetadata/v1/instance/scheduling/preemptible" -H "Metadata-Flavor: Google")
|
||||
if [ ${preemptible} == "TRUE" ]; then
|
||||
echo "Shutting down! Sleeping for a minute to let the node gracefully terminate"
|
||||
# https://cloud.google.com/compute/docs/instances/stopping-or-deleting-an-instance#delete_timeout
|
||||
sleep 30
|
||||
fi
|
37
vendor/k8s.io/kubernetes/cluster/gce/list-resources.sh
generated
vendored
37
vendor/k8s.io/kubernetes/cluster/gce/list-resources.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
@ -38,17 +38,18 @@ if [[ "${KUBERNETES_PROVIDER:-}" == "gke" ]]; then
|
||||
INSTANCE_PREFIX="${INSTANCE_PREFIX:0:26}"
|
||||
fi
|
||||
|
||||
# Usage: gcloud-compute-list <resource> <additional parameters to gcloud...>
|
||||
# Usage: gcloud-list <group> <resource> <additional parameters to gcloud...>
|
||||
# GREP_REGEX is applied to the output of gcloud if set
|
||||
GREP_REGEX=""
|
||||
function gcloud-compute-list() {
|
||||
local -r resource=$1
|
||||
local -r filter=${2:-}
|
||||
echo -e "\n\n[ ${resource} ]"
|
||||
function gcloud-list() {
|
||||
local -r group=$1
|
||||
local -r resource=$2
|
||||
local -r filter=${3:-}
|
||||
echo -e "\n\n[ ${group} ${resource} ]"
|
||||
local attempt=1
|
||||
local result=""
|
||||
while true; do
|
||||
if result=$(gcloud compute ${resource} list --project=${PROJECT} ${filter:+--filter="$filter"} ${@:3}); then
|
||||
if result=$(gcloud ${group} ${resource} list --project=${PROJECT} ${filter:+--filter="$filter"} ${@:4}); then
|
||||
if [[ ! -z "${GREP_REGEX}" ]]; then
|
||||
result=$(echo "${result}" | grep "${GREP_REGEX}" || true)
|
||||
fi
|
||||
@ -57,7 +58,7 @@ function gcloud-compute-list() {
|
||||
fi
|
||||
echo -e "Attempt ${attempt} failed to list ${resource}. Retrying." >&2
|
||||
attempt=$(($attempt+1))
|
||||
if [[ ${attempt} > 5 ]]; then
|
||||
if [[ ${attempt} -gt 5 ]]; then
|
||||
echo -e "List ${resource} failed!" >&2
|
||||
exit 2
|
||||
fi
|
||||
@ -74,21 +75,23 @@ echo "Provider: ${KUBERNETES_PROVIDER:-}"
|
||||
|
||||
# List resources related to instances, filtering by the instance prefix if
|
||||
# provided.
|
||||
gcloud-compute-list instance-templates "name ~ '${INSTANCE_PREFIX}.*'"
|
||||
gcloud-compute-list instance-groups "${ZONE:+"zone:(${ZONE}) AND "}name ~ '${INSTANCE_PREFIX}.*'"
|
||||
gcloud-compute-list instances "${ZONE:+"zone:(${ZONE}) AND "}name ~ '${INSTANCE_PREFIX}.*'"
|
||||
gcloud-list compute instance-templates "name ~ '${INSTANCE_PREFIX}.*'"
|
||||
gcloud-list compute instance-groups "${ZONE:+"zone:(${ZONE}) AND "}name ~ '${INSTANCE_PREFIX}.*'"
|
||||
gcloud-list compute instances "${ZONE:+"zone:(${ZONE}) AND "}name ~ '${INSTANCE_PREFIX}.*'"
|
||||
|
||||
# List disk resources, filtering by instance prefix if provided.
|
||||
gcloud-compute-list disks "${ZONE:+"zone:(${ZONE}) AND "}name ~ '${INSTANCE_PREFIX}.*'"
|
||||
gcloud-list compute disks "${ZONE:+"zone:(${ZONE}) AND "}name ~ '${INSTANCE_PREFIX}.*'"
|
||||
|
||||
# List network resources. We include names starting with "a", corresponding to
|
||||
# those that Kubernetes creates.
|
||||
gcloud-compute-list addresses "${REGION:+"region=(${REGION}) AND "}name ~ 'a.*|${INSTANCE_PREFIX}.*'"
|
||||
gcloud-list compute addresses "${REGION:+"region=(${REGION}) AND "}name ~ 'a.*|${INSTANCE_PREFIX}.*'"
|
||||
# Match either the header or a line with the specified e2e network.
|
||||
# This assumes that the network name is the second field in the output.
|
||||
GREP_REGEX="^NAME\|^[^ ]\+[ ]\+\(default\|${NETWORK}\) "
|
||||
gcloud-compute-list routes "name ~ 'default.*|${INSTANCE_PREFIX}.*'"
|
||||
gcloud-compute-list firewall-rules "name ~ 'default.*|k8s-fw.*|${INSTANCE_PREFIX}.*'"
|
||||
gcloud-list compute routes "name ~ 'default.*|${INSTANCE_PREFIX}.*'"
|
||||
gcloud-list compute firewall-rules "name ~ 'default.*|k8s-fw.*|${INSTANCE_PREFIX}.*'"
|
||||
GREP_REGEX=""
|
||||
gcloud-compute-list forwarding-rules ${REGION:+"region=(${REGION})"}
|
||||
gcloud-compute-list target-pools ${REGION:+"region=(${REGION})"}
|
||||
gcloud-list compute forwarding-rules ${REGION:+"region=(${REGION})"}
|
||||
gcloud-list compute target-pools ${REGION:+"region=(${REGION})"}
|
||||
|
||||
gcloud-list logging sinks
|
||||
|
44
vendor/k8s.io/kubernetes/cluster/gce/manifests/BUILD
generated
vendored
Normal file
44
vendor/k8s.io/kubernetes/cluster/gce/manifests/BUILD
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load("@io_kubernetes_build//defs:build.bzl", "release_filegroup")
|
||||
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
|
||||
|
||||
pkg_tar(
|
||||
name = "gce-master-manifests",
|
||||
srcs = [":manifests"],
|
||||
mode = "0644",
|
||||
)
|
||||
|
||||
# if you update this, also update function kube::release::package_kube_manifests_tarball() in build/lib/release.sh
|
||||
filegroup(
|
||||
name = "manifests",
|
||||
srcs = [
|
||||
"abac-authz-policy.jsonl",
|
||||
"cluster-autoscaler.manifest",
|
||||
"e2e-image-puller.manifest",
|
||||
"etcd.manifest",
|
||||
"etcd-empty-dir-cleanup.yaml",
|
||||
"glbc.manifest",
|
||||
"kms-plugin-container.manifest",
|
||||
"kube-addon-manager.yaml",
|
||||
"kube-apiserver.manifest",
|
||||
"kube-controller-manager.manifest",
|
||||
"kube-proxy.manifest",
|
||||
"kube-scheduler.manifest",
|
||||
"rescheduler.manifest",
|
||||
] + glob(["internal-*"]),
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
18
vendor/k8s.io/kubernetes/cluster/gce/manifests/cluster-autoscaler.manifest
generated
vendored
18
vendor/k8s.io/kubernetes/cluster/gce/manifests/cluster-autoscaler.manifest
generated
vendored
@ -7,6 +7,9 @@
|
||||
"labels": {
|
||||
"tier": "cluster-management",
|
||||
"component": "cluster-autoscaler"
|
||||
},
|
||||
"annotations": {
|
||||
"seccomp.security.alpha.kubernetes.io/pod": "docker/default"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
@ -14,7 +17,7 @@
|
||||
"containers": [
|
||||
{
|
||||
"name": "cluster-autoscaler",
|
||||
"image": "k8s.gcr.io/cluster-autoscaler:v1.1.1",
|
||||
"image": "k8s.gcr.io/cluster-autoscaler:v1.3.0",
|
||||
"livenessProbe": {
|
||||
"httpGet": {
|
||||
"path": "/health-check",
|
||||
@ -25,7 +28,7 @@
|
||||
},
|
||||
"command": [
|
||||
"./run.sh",
|
||||
"--kubernetes=http://127.0.0.1:8080?inClusterConfig=f",
|
||||
"--kubernetes=https://127.0.0.1:443",
|
||||
"--v=4",
|
||||
"--logtostderr=true",
|
||||
"--write-status-configmap=true",
|
||||
@ -56,6 +59,11 @@
|
||||
"readOnly": true,
|
||||
"mountPath": "/usr/share/ca-certificates"
|
||||
},
|
||||
{
|
||||
"name": "srvkube",
|
||||
"readOnly": true,
|
||||
"mountPath": "/etc/srv/kubernetes/cluster-autoscaler"
|
||||
},
|
||||
{
|
||||
"name": "logfile",
|
||||
"mountPath": "/var/log/cluster-autoscaler.log",
|
||||
@ -80,6 +88,12 @@
|
||||
"path": "/usr/share/ca-certificates"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "srvkube",
|
||||
"hostPath": {
|
||||
"path": "/etc/srv/kubernetes/cluster-autoscaler"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "logfile",
|
||||
"hostPath": {
|
||||
|
32
vendor/k8s.io/kubernetes/cluster/gce/manifests/e2e-image-puller.manifest
generated
vendored
32
vendor/k8s.io/kubernetes/cluster/gce/manifests/e2e-image-puller.manifest
generated
vendored
@ -34,7 +34,7 @@ spec:
|
||||
k8s.gcr.io/busybox:1.24
|
||||
k8s.gcr.io/dnsutils:e2e
|
||||
k8s.gcr.io/e2e-net-amd64:1.0
|
||||
k8s.gcr.io/echoserver:1.6
|
||||
k8s.gcr.io/echoserver:1.10
|
||||
k8s.gcr.io/eptest:0.1
|
||||
k8s.gcr.io/fakegitserver:0.1
|
||||
k8s.gcr.io/galera-install:0.1
|
||||
@ -69,21 +69,23 @@ spec:
|
||||
k8s.gcr.io/test-webserver:e2e
|
||||
k8s.gcr.io/update-demo:kitten
|
||||
k8s.gcr.io/update-demo:nautilus
|
||||
k8s.gcr.io/volume-ceph:0.1
|
||||
k8s.gcr.io/volume-gluster:0.2
|
||||
k8s.gcr.io/volume-iscsi:0.1
|
||||
k8s.gcr.io/volume-nfs:0.8
|
||||
k8s.gcr.io/volume-rbd:0.1
|
||||
gcr.io/kubernetes-e2e-test-images/volume-ceph:0.1
|
||||
gcr.io/kubernetes-e2e-test-images/volume-gluster:0.2
|
||||
gcr.io/kubernetes-e2e-test-images/volume-iscsi:0.1
|
||||
gcr.io/kubernetes-e2e-test-images/volume-nfs:0.8
|
||||
gcr.io/kubernetes-e2e-test-images/volume-rbd:0.1
|
||||
k8s.gcr.io/zookeeper-install-3.5.0-alpha:e2e
|
||||
gcr.io/google_samples/gb-redisslave:nonexistent
|
||||
; do echo $(date '+%X') pulling $i; docker pull $i 1>/dev/null; done; exit 0;
|
||||
; do echo $(date '+%X') pulling $i; crictl pull $i 1>/dev/null; done; exit 0;
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/docker.sock
|
||||
- mountPath: {{ container_runtime_endpoint }}
|
||||
name: socket
|
||||
- mountPath: /usr/bin/docker
|
||||
name: docker
|
||||
- mountPath: /usr/bin/crictl
|
||||
name: crictl
|
||||
- mountPath: /etc/crictl.yaml
|
||||
name: config
|
||||
# Add a container that runs a health-check
|
||||
- name: nethealth-check
|
||||
resources:
|
||||
@ -98,13 +100,17 @@ spec:
|
||||
- "/usr/bin/nethealth || true"
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/run/docker.sock
|
||||
path: {{ container_runtime_endpoint }}
|
||||
type: Socket
|
||||
name: socket
|
||||
- hostPath:
|
||||
path: /usr/bin/docker
|
||||
path: /home/kubernetes/bin/crictl
|
||||
type: File
|
||||
name: docker
|
||||
name: crictl
|
||||
- hostPath:
|
||||
path: /etc/crictl.yaml
|
||||
type: File
|
||||
name: config
|
||||
# This pod is really fire-and-forget.
|
||||
restartPolicy: OnFailure
|
||||
# This pod needs hostNetworking for true VM perf measurement as well as avoiding cbr0 issues
|
||||
|
17
vendor/k8s.io/kubernetes/cluster/gce/manifests/etcd-empty-dir-cleanup.yaml
generated
vendored
Normal file
17
vendor/k8s.io/kubernetes/cluster/gce/manifests/etcd-empty-dir-cleanup.yaml
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: etcd-empty-dir-cleanup
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
labels:
|
||||
k8s-app: etcd-empty-dir-cleanup
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
hostNetwork: true
|
||||
dnsPolicy: Default
|
||||
containers:
|
||||
- name: etcd-empty-dir-cleanup
|
||||
image: k8s.gcr.io/etcd-empty-dir-cleanup:3.2.18.0
|
23
vendor/k8s.io/kubernetes/cluster/gce/manifests/etcd.manifest
generated
vendored
23
vendor/k8s.io/kubernetes/cluster/gce/manifests/etcd.manifest
generated
vendored
@ -5,7 +5,8 @@
|
||||
"name":"etcd-server{{ suffix }}",
|
||||
"namespace": "kube-system",
|
||||
"annotations": {
|
||||
"scheduler.alpha.kubernetes.io/critical-pod": ""
|
||||
"scheduler.alpha.kubernetes.io/critical-pod": "",
|
||||
"seccomp.security.alpha.kubernetes.io/pod": "docker/default"
|
||||
}
|
||||
},
|
||||
"spec":{
|
||||
@ -13,7 +14,7 @@
|
||||
"containers":[
|
||||
{
|
||||
"name": "etcd-container",
|
||||
"image": "{{ pillar.get('etcd_docker_repository', 'k8s.gcr.io/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.2.14') }}",
|
||||
"image": "{{ pillar.get('etcd_docker_repository', 'k8s.gcr.io/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.2.18-0') }}",
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": {{ cpulimit }}
|
||||
@ -22,20 +23,32 @@
|
||||
"command": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"if [ -e /usr/local/bin/migrate-if-needed.sh ]; then /usr/local/bin/migrate-if-needed.sh 1>>/var/log/etcd{{ suffix }}.log 2>&1; fi; exec /usr/local/bin/etcd --name etcd-{{ hostname }} --listen-peer-urls {{ etcd_protocol }}://{{ host_ip }}:{{ server_port }} --initial-advertise-peer-urls {{ etcd_protocol }}://{{ hostname }}:{{ server_port }} --advertise-client-urls http://127.0.0.1:{{ port }} --listen-client-urls http://127.0.0.1:{{ port }} {{ quota_bytes }} --data-dir /var/etcd/data{{ suffix }} --initial-cluster-state {{ cluster_state }} --initial-cluster {{ etcd_cluster }} {{ etcd_creds }} 1>>/var/log/etcd{{ suffix }}.log 2>&1"
|
||||
"if [ -e /usr/local/bin/migrate-if-needed.sh ]; then /usr/local/bin/migrate-if-needed.sh 1>>/var/log/etcd{{ suffix }}.log 2>&1; fi; exec /usr/local/bin/etcd --name etcd-{{ hostname }} --listen-peer-urls {{ etcd_protocol }}://{{ host_ip }}:{{ server_port }} --initial-advertise-peer-urls {{ etcd_protocol }}://{{ hostname }}:{{ server_port }} --advertise-client-urls http://127.0.0.1:{{ port }} --listen-client-urls http://127.0.0.1:{{ port }} {{ quota_bytes }} --data-dir /var/etcd/data{{ suffix }} --initial-cluster-state {{ cluster_state }} --initial-cluster {{ etcd_cluster }} {{ etcd_creds }} {{ etcd_extra_args }} 1>>/var/log/etcd{{ suffix }}.log 2>&1"
|
||||
],
|
||||
"env": [
|
||||
{ "name": "TARGET_STORAGE",
|
||||
"value": "{{ pillar.get('storage_backend', 'etcd3') }}"
|
||||
},
|
||||
{ "name": "TARGET_VERSION",
|
||||
"value": "{{ pillar.get('etcd_version', '3.2.14') }}"
|
||||
"value": "{{ pillar.get('etcd_version', '3.2.18') }}"
|
||||
},
|
||||
{ "name": "DATA_DIRECTORY",
|
||||
"value": "/var/etcd/data{{ suffix }}"
|
||||
},
|
||||
{ "name": "INITIAL_CLUSTER",
|
||||
"value": "{{ etcd_cluster }}"
|
||||
},
|
||||
{ "name": "LISTEN_PEER_URLS",
|
||||
"value": "{{ etcd_protocol }}://{{ host_ip }}:{{ server_port }}"
|
||||
},
|
||||
{ "name": "INITIAL_ADVERTISE_PEER_URLS",
|
||||
"value": "{{ etcd_protocol }}://{{ hostname }}:{{ server_port }}"
|
||||
},
|
||||
{ "name": "ETCD_CREDS",
|
||||
"value": "{{ etcd_creds }}"
|
||||
},
|
||||
{ "name": "ETCD_SNAPSHOT_COUNT",
|
||||
"value": "10000"
|
||||
}
|
||||
],
|
||||
"livenessProbe": {
|
||||
@ -50,7 +63,7 @@
|
||||
"ports": [
|
||||
{ "name": "serverport",
|
||||
"containerPort": {{ server_port }},
|
||||
"hostPort": {{ server_port }}
|
||||
"hostPort": {{ server_port }}
|
||||
},
|
||||
{ "name": "clientport",
|
||||
"containerPort": {{ port }},
|
||||
|
9
vendor/k8s.io/kubernetes/cluster/gce/manifests/glbc.manifest
generated
vendored
9
vendor/k8s.io/kubernetes/cluster/gce/manifests/glbc.manifest
generated
vendored
@ -1,19 +1,20 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: l7-lb-controller-v0.9.8-alpha.2
|
||||
name: l7-lb-controller-v1.1.1
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
labels:
|
||||
k8s-app: gcp-lb-controller
|
||||
version: v0.9.8-alpha.2
|
||||
version: v1.1.1
|
||||
kubernetes.io/name: "GLBC"
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 600
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- image: k8s.gcr.io/ingress-gce-glbc-amd64:0.9.8-alpha.2
|
||||
- image: k8s.gcr.io/ingress-gce-glbc-amd64:v1.1.1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
@ -44,7 +45,7 @@ spec:
|
||||
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
|
||||
- sh
|
||||
- -c
|
||||
- 'exec /glbc --verbose --apiserver-host=http://localhost:8080 --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1'
|
||||
- 'exec /glbc --gce-ratelimit=ga.Operations.Get,qps,10,100 --gce-ratelimit=alpha.Operations.Get,qps,10,100 --gce-ratelimit=ga.BackendServices.Get,qps,1.8,1 --gce-ratelimit=ga.HealthChecks.Get,qps,1.8,1 --gce-ratelimit=alpha.HealthChecks.Get,qps,1.8,1 --verbose --apiserver-host=http://localhost:8080 --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1'
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /etc/gce.conf
|
||||
|
8
vendor/k8s.io/kubernetes/cluster/gce/manifests/kms-plugin-container.manifest
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/cluster/gce/manifests/kms-plugin-container.manifest
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "kms-plugin",
|
||||
"image": "gcr.io/google-containers/k8s-cloud-kms-plugin:v0.1.1",
|
||||
"command": ["/k8s-cloud-kms-plugin", "--key-uri={{kms_key_uri}}", "--path-to-unix-socket={{kms_path_to_socket}}", "--gce-config={{gce_conf_path}}", "--logtostderr", "2>\&1"],
|
||||
"livenessProbe": { "httpGet": {"host": "127.0.0.1", "port": 8081, "path": "/healthz"}, "initialDelaySeconds": 3, "timeoutSeconds": 3},
|
||||
"ports":[{ "name": "healthz", "containerPort": 8081, "hostPort": 8081}, { "name": "metrics", "containerPort": 8082, "hostPort": 8082}],
|
||||
"volumeMounts": [{{cloud_config_mount}}, {{kms_socket_mount}}]
|
||||
}
|
1
vendor/k8s.io/kubernetes/cluster/gce/manifests/kube-addon-manager.yaml
generated
vendored
1
vendor/k8s.io/kubernetes/cluster/gce/manifests/kube-addon-manager.yaml
generated
vendored
@ -5,6 +5,7 @@ metadata:
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
labels:
|
||||
component: kube-addon-manager
|
||||
spec:
|
||||
|
8
vendor/k8s.io/kubernetes/cluster/gce/manifests/kube-apiserver.manifest
generated
vendored
8
vendor/k8s.io/kubernetes/cluster/gce/manifests/kube-apiserver.manifest
generated
vendored
@ -5,7 +5,8 @@
|
||||
"name":"kube-apiserver",
|
||||
"namespace": "kube-system",
|
||||
"annotations": {
|
||||
"scheduler.alpha.kubernetes.io/critical-pod": ""
|
||||
"scheduler.alpha.kubernetes.io/critical-pod": "",
|
||||
"seccomp.security.alpha.kubernetes.io/pod": "docker/default"
|
||||
},
|
||||
"labels": {
|
||||
"tier": "control-plane",
|
||||
@ -15,6 +16,7 @@
|
||||
"spec":{
|
||||
"hostNetwork": true,
|
||||
"containers":[
|
||||
{{kms_plugin_container}}
|
||||
{
|
||||
"name": "kube-apiserver",
|
||||
"image": "{{pillar['kube_docker_registry']}}/kube-apiserver:{{pillar['kube-apiserver_docker_tag']}}",
|
||||
@ -47,6 +49,8 @@
|
||||
"hostPort": 8080}
|
||||
],
|
||||
"volumeMounts": [
|
||||
{{kms_socket_mount}}
|
||||
{{encryption_provider_mount}}
|
||||
{{cloud_config_mount}}
|
||||
{{additional_cloud_config_mount}}
|
||||
{{webhook_config_mount}}
|
||||
@ -86,6 +90,8 @@
|
||||
}
|
||||
],
|
||||
"volumes":[
|
||||
{{kms_socket_volume}}
|
||||
{{encryption_provider_volume}}
|
||||
{{cloud_config_volume}}
|
||||
{{additional_cloud_config_volume}}
|
||||
{{webhook_config_volume}}
|
||||
|
3
vendor/k8s.io/kubernetes/cluster/gce/manifests/kube-controller-manager.manifest
generated
vendored
3
vendor/k8s.io/kubernetes/cluster/gce/manifests/kube-controller-manager.manifest
generated
vendored
@ -5,7 +5,8 @@
|
||||
"name":"kube-controller-manager",
|
||||
"namespace": "kube-system",
|
||||
"annotations": {
|
||||
"scheduler.alpha.kubernetes.io/critical-pod": ""
|
||||
"scheduler.alpha.kubernetes.io/critical-pod": "",
|
||||
"seccomp.security.alpha.kubernetes.io/pod": "docker/default"
|
||||
},
|
||||
"labels": {
|
||||
"tier": "control-plane",
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/gce/manifests/kube-proxy.manifest
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/gce/manifests/kube-proxy.manifest
generated
vendored
@ -14,7 +14,7 @@ metadata:
|
||||
tier: node
|
||||
component: kube-proxy
|
||||
spec:
|
||||
{{pod_priority}}
|
||||
priorityClassName: system-node-critical
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
- operator: "Exists"
|
||||
|
3
vendor/k8s.io/kubernetes/cluster/gce/manifests/kube-scheduler.manifest
generated
vendored
3
vendor/k8s.io/kubernetes/cluster/gce/manifests/kube-scheduler.manifest
generated
vendored
@ -5,7 +5,8 @@
|
||||
"name":"kube-scheduler",
|
||||
"namespace": "kube-system",
|
||||
"annotations": {
|
||||
"scheduler.alpha.kubernetes.io/critical-pod": ""
|
||||
"scheduler.alpha.kubernetes.io/critical-pod": "",
|
||||
"seccomp.security.alpha.kubernetes.io/pod": "docker/default"
|
||||
},
|
||||
"labels": {
|
||||
"tier": "control-plane",
|
||||
|
6
vendor/k8s.io/kubernetes/cluster/gce/manifests/rescheduler.manifest
generated
vendored
6
vendor/k8s.io/kubernetes/cluster/gce/manifests/rescheduler.manifest
generated
vendored
@ -1,19 +1,19 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: rescheduler-v0.3.1
|
||||
name: rescheduler-v0.4.0
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
labels:
|
||||
k8s-app: rescheduler
|
||||
version: v0.3.1
|
||||
version: v0.4.0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "Rescheduler"
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- image: k8s.gcr.io/rescheduler:v0.3.1
|
||||
- image: k8s.gcr.io/rescheduler:v0.4.0
|
||||
name: rescheduler
|
||||
volumeMounts:
|
||||
- mountPath: /var/log/rescheduler.log
|
||||
|
8
vendor/k8s.io/kubernetes/cluster/gce/upgrade-aliases.sh
generated
vendored
8
vendor/k8s.io/kubernetes/cluster/gce/upgrade-aliases.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
@ -53,7 +53,7 @@ function detect-k8s-subnetwork() {
|
||||
local subnetwork_url=$(gcloud compute instances describe \
|
||||
${KUBE_MASTER} --project=${PROJECT} --zone=${ZONE} \
|
||||
--format='value(networkInterfaces[0].subnetwork)')
|
||||
if [ -n ${subnetwork_url} ]; then
|
||||
if [[ -n ${subnetwork_url} ]]; then
|
||||
IP_ALIAS_SUBNETWORK=$(echo ${subnetwork_url##*/})
|
||||
fi
|
||||
}
|
||||
@ -161,8 +161,8 @@ export KUBE_GCE_ENABLE_IP_ALIASES=true
|
||||
export SECONDARY_RANGE_NAME="pods-default"
|
||||
export STORAGE_BACKEND="etcd3"
|
||||
export STORAGE_MEDIA_TYPE="application/vnd.kubernetes.protobuf"
|
||||
export ETCD_IMAGE=3.2.14
|
||||
export ETCD_VERSION=3.2.14
|
||||
export ETCD_IMAGE=3.2.18-0
|
||||
export ETCD_VERSION=3.2.18
|
||||
|
||||
# Upgrade master with updated kube envs
|
||||
${KUBE_ROOT}/cluster/gce/upgrade.sh -M -l
|
||||
|
76
vendor/k8s.io/kubernetes/cluster/gce/upgrade.sh
generated
vendored
76
vendor/k8s.io/kubernetes/cluster/gce/upgrade.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
@ -291,18 +291,17 @@ function upgrade-node-env() {
|
||||
# Note: This is called multiple times from do-node-upgrade() in parallel, so should be thread-safe.
|
||||
function do-single-node-upgrade() {
|
||||
local -r instance="$1"
|
||||
instance_id=$(gcloud compute instances describe "${instance}" \
|
||||
--format='get(id)' \
|
||||
--project="${PROJECT}" \
|
||||
--zone="${ZONE}" 2>&1) && describe_rc=$? || describe_rc=$?
|
||||
if [[ "${describe_rc}" != 0 ]]; then
|
||||
echo "== FAILED to describe ${instance} =="
|
||||
echo "${instance_id}"
|
||||
return ${describe_rc}
|
||||
local kubectl_rc
|
||||
local boot_id=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output=jsonpath='{.status.nodeInfo.bootID}' 2>&1) && kubectl_rc=$? || kubectl_rc=$?
|
||||
if [[ "${kubectl_rc}" != 0 ]]; then
|
||||
echo "== FAILED to get bootID ${instance} =="
|
||||
echo "${boot_id}"
|
||||
return ${kubectl_rc}
|
||||
fi
|
||||
|
||||
# Drain node
|
||||
echo "== Draining ${instance}. == " >&2
|
||||
local drain_rc
|
||||
"${KUBE_ROOT}/cluster/kubectl.sh" drain --delete-local-data --force --ignore-daemonsets "${instance}" \
|
||||
&& drain_rc=$? || drain_rc=$?
|
||||
if [[ "${drain_rc}" != 0 ]]; then
|
||||
@ -312,7 +311,8 @@ function do-single-node-upgrade() {
|
||||
|
||||
# Recreate instance
|
||||
echo "== Recreating instance ${instance}. ==" >&2
|
||||
recreate=$(gcloud compute instance-groups managed recreate-instances "${group}" \
|
||||
local recreate_rc
|
||||
local recreate=$(gcloud compute instance-groups managed recreate-instances "${group}" \
|
||||
--project="${PROJECT}" \
|
||||
--zone="${ZONE}" \
|
||||
--instances="${instance}" 2>&1) && recreate_rc=$? || recreate_rc=$?
|
||||
@ -322,55 +322,31 @@ function do-single-node-upgrade() {
|
||||
return ${recreate_rc}
|
||||
fi
|
||||
|
||||
# Wait for instance to be recreated
|
||||
echo "== Waiting for instance ${instance} to be recreated. ==" >&2
|
||||
while true; do
|
||||
new_instance_id=$(gcloud compute instances describe "${instance}" \
|
||||
--format='get(id)' \
|
||||
--project="${PROJECT}" \
|
||||
--zone="${ZONE}" 2>&1) && describe_rc=$? || describe_rc=$?
|
||||
if [[ "${describe_rc}" != 0 ]]; then
|
||||
echo "== FAILED to describe ${instance} =="
|
||||
echo "${new_instance_id}"
|
||||
echo " (Will retry.)"
|
||||
elif [[ "${new_instance_id}" == "${instance_id}" ]]; then
|
||||
echo -n .
|
||||
else
|
||||
echo "Instance ${instance} recreated."
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Wait for k8s node object to reflect new instance id
|
||||
# Wait for node status to reflect a new boot ID. This guarantees us
|
||||
# that the node status in the API is from a different boot. This
|
||||
# does not guarantee that the status is from the upgraded node, but
|
||||
# it is a best effort approximation.
|
||||
echo "== Waiting for new node to be added to k8s. ==" >&2
|
||||
while true; do
|
||||
external_id=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output=jsonpath='{.spec.externalID}' 2>&1) && kubectl_rc=$? || kubectl_rc=$?
|
||||
local new_boot_id=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output=jsonpath='{.status.nodeInfo.bootID}' 2>&1) && kubectl_rc=$? || kubectl_rc=$?
|
||||
if [[ "${kubectl_rc}" != 0 ]]; then
|
||||
echo "== FAILED to get node ${instance} =="
|
||||
echo "${external_id}"
|
||||
echo "${boot_id}"
|
||||
echo " (Will retry.)"
|
||||
elif [[ "${external_id}" == "${new_instance_id}" ]]; then
|
||||
elif [[ "${boot_id}" != "${new_boot_id}" ]]; then
|
||||
echo "Node ${instance} recreated."
|
||||
break
|
||||
elif [[ "${external_id}" == "${instance_id}" ]]; then
|
||||
echo -n .
|
||||
else
|
||||
echo "Unexpected external_id '${external_id}' matches neither old ('${instance_id}') nor new ('${new_instance_id}')."
|
||||
echo " (Will retry.)"
|
||||
echo -n .
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Wait for the node to not have SchedulingDisabled=True and also to have
|
||||
# Ready=True.
|
||||
# Wait for the node to have Ready=True.
|
||||
echo "== Waiting for ${instance} to become ready. ==" >&2
|
||||
while true; do
|
||||
cordoned=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output='jsonpath={.status.conditions[?(@.type == "SchedulingDisabled")].status}')
|
||||
ready=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output='jsonpath={.status.conditions[?(@.type == "Ready")].status}')
|
||||
if [[ "${cordoned}" == 'True' ]]; then
|
||||
echo "Node ${instance} is still not ready: SchedulingDisabled=${ready}"
|
||||
elif [[ "${ready}" != 'True' ]]; then
|
||||
local ready=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output='jsonpath={.status.conditions[?(@.type == "Ready")].status}')
|
||||
if [[ "${ready}" != 'True' ]]; then
|
||||
echo "Node ${instance} is still not ready: Ready=${ready}"
|
||||
else
|
||||
echo "Node ${instance} Ready=${ready}"
|
||||
@ -378,6 +354,16 @@ function do-single-node-upgrade() {
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Uncordon the node.
|
||||
echo "== Uncordon ${instance}. == " >&2
|
||||
local uncordon_rc
|
||||
"${KUBE_ROOT}/cluster/kubectl.sh" uncordon "${instance}" \
|
||||
&& uncordon_rc=$? || uncordon_rc=$?
|
||||
if [[ "${uncordon_rc}" != 0 ]]; then
|
||||
echo "== FAILED to uncordon ${instance} =="
|
||||
return ${uncordon_rc}
|
||||
fi
|
||||
}
|
||||
|
||||
# Prereqs:
|
||||
|
438
vendor/k8s.io/kubernetes/cluster/gce/util.sh
generated
vendored
438
vendor/k8s.io/kubernetes/cluster/gce/util.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
@ -25,7 +25,7 @@ source "${KUBE_ROOT}/cluster/gce/${KUBE_CONFIG_FILE-"config-default.sh"}"
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
source "${KUBE_ROOT}/hack/lib/util.sh"
|
||||
|
||||
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]]; then
|
||||
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "ubuntu" || "${NODE_OS_DISTRIBUTION}" == "custom" ]]; then
|
||||
source "${KUBE_ROOT}/cluster/gce/${NODE_OS_DISTRIBUTION}/node-helper.sh"
|
||||
else
|
||||
echo "Cannot operate on cluster using node os distro: ${NODE_OS_DISTRIBUTION}" >&2
|
||||
@ -502,6 +502,7 @@ function write-master-env {
|
||||
|
||||
construct-kubelet-flags true
|
||||
build-kube-env true "${KUBE_TEMP}/master-kube-env.yaml"
|
||||
build-kubelet-config true "${KUBE_TEMP}/master-kubelet-config.yaml"
|
||||
build-kube-master-certs "${KUBE_TEMP}/kube-master-certs.yaml"
|
||||
}
|
||||
|
||||
@ -512,79 +513,11 @@ function write-node-env {
|
||||
|
||||
construct-kubelet-flags false
|
||||
build-kube-env false "${KUBE_TEMP}/node-kube-env.yaml"
|
||||
build-kubelet-config false "${KUBE_TEMP}/node-kubelet-config.yaml"
|
||||
}
|
||||
|
||||
# $1: if 'true', we're rendering flags for a master, else a node
|
||||
function construct-kubelet-flags {
|
||||
function build-node-labels {
|
||||
local master=$1
|
||||
local flags="${KUBELET_TEST_LOG_LEVEL:-"--v=2"} ${KUBELET_TEST_ARGS:-}"
|
||||
flags+=" --allow-privileged=true"
|
||||
flags+=" --cgroup-root=/"
|
||||
flags+=" --cloud-provider=gce"
|
||||
flags+=" --cluster-dns=${DNS_SERVER_IP}"
|
||||
flags+=" --cluster-domain=${DNS_DOMAIN}"
|
||||
flags+=" --pod-manifest-path=/etc/kubernetes/manifests"
|
||||
# Keep in sync with CONTAINERIZED_MOUNTER_HOME in configure-helper.sh
|
||||
flags+=" --experimental-mounter-path=/home/kubernetes/containerized_mounter/mounter"
|
||||
flags+=" --experimental-check-node-capabilities-before-mount=true"
|
||||
# Keep in sync with the mkdir command in configure-helper.sh (until the TODO is resolved)
|
||||
flags+=" --cert-dir=/var/lib/kubelet/pki/"
|
||||
|
||||
if [[ "${master}" == "true" ]]; then
|
||||
flags+=" ${MASTER_KUBELET_TEST_ARGS:-}"
|
||||
flags+=" --enable-debugging-handlers=false"
|
||||
flags+=" --hairpin-mode=none"
|
||||
if [[ "${REGISTER_MASTER_KUBELET:-false}" == "true" ]]; then
|
||||
#TODO(mikedanese): allow static pods to start before creating a client
|
||||
#flags+=" --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig"
|
||||
#flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig"
|
||||
flags+=" --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig"
|
||||
flags+=" --register-schedulable=false"
|
||||
else
|
||||
# Note: Standalone mode is used by GKE
|
||||
flags+=" --pod-cidr=${MASTER_IP_RANGE}"
|
||||
fi
|
||||
else # For nodes
|
||||
flags+=" ${NODE_KUBELET_TEST_ARGS:-}"
|
||||
flags+=" --enable-debugging-handlers=true"
|
||||
flags+=" --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig"
|
||||
flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig"
|
||||
if [[ "${HAIRPIN_MODE:-}" == "promiscuous-bridge" ]] || \
|
||||
[[ "${HAIRPIN_MODE:-}" == "hairpin-veth" ]] || \
|
||||
[[ "${HAIRPIN_MODE:-}" == "none" ]]; then
|
||||
flags+=" --hairpin-mode=${HAIRPIN_MODE}"
|
||||
fi
|
||||
# Keep client-ca-file in sync with CA_CERT_BUNDLE_PATH in configure-helper.sh
|
||||
flags+=" --anonymous-auth=false --authorization-mode=Webhook --client-ca-file=/etc/srv/kubernetes/pki/ca-certificates.crt"
|
||||
fi
|
||||
# Network plugin
|
||||
if [[ -n "${NETWORK_PROVIDER:-}" || -n "${NETWORK_POLICY_PROVIDER:-}" ]]; then
|
||||
flags+=" --cni-bin-dir=/home/kubernetes/bin"
|
||||
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
|
||||
# Calico uses CNI always.
|
||||
# Note that network policy won't work for master node.
|
||||
if [[ "${master}" == "true" ]]; then
|
||||
flags+=" --network-plugin=${NETWORK_PROVIDER}"
|
||||
else
|
||||
flags+=" --network-plugin=cni"
|
||||
fi
|
||||
else
|
||||
# Otherwise use the configured value.
|
||||
flags+=" --network-plugin=${NETWORK_PROVIDER}"
|
||||
fi
|
||||
fi
|
||||
if [[ -n "${NON_MASQUERADE_CIDR:-}" ]]; then
|
||||
flags+=" --non-masquerade-cidr=${NON_MASQUERADE_CIDR}"
|
||||
fi
|
||||
flags+=" --volume-plugin-dir=${VOLUME_PLUGIN_DIR}"
|
||||
# Note: ENABLE_MANIFEST_URL is used by GKE
|
||||
if [[ "${ENABLE_MANIFEST_URL:-}" == "true" ]]; then
|
||||
flags+=" --manifest-url=${MANIFEST_URL}"
|
||||
flags+=" --manifest-url-header=${MANIFEST_URL_HEADER}"
|
||||
fi
|
||||
if [[ -n "${ENABLE_CUSTOM_METRICS:-}" ]]; then
|
||||
flags+=" --enable-custom-metrics=${ENABLE_CUSTOM_METRICS}"
|
||||
fi
|
||||
local node_labels=""
|
||||
if [[ "${KUBE_PROXY_DAEMONSET:-}" == "true" && "${master}" != "true" ]]; then
|
||||
# Add kube-proxy daemonset label to node to avoid situation during cluster
|
||||
@ -597,18 +530,143 @@ function construct-kubelet-flags {
|
||||
if [[ -n "${NON_MASTER_NODE_LABELS:-}" && "${master}" != "true" ]]; then
|
||||
node_labels="${node_labels:+${node_labels},}${NON_MASTER_NODE_LABELS}"
|
||||
fi
|
||||
echo $node_labels
|
||||
}
|
||||
|
||||
# yaml-map-string-stringarray converts the encoded structure to yaml format, and echoes the result
|
||||
# under the provided name. If the encoded structure is empty, echoes nothing.
|
||||
# 1: name to be output in yaml
|
||||
# 2: encoded map-string-string (which may contain duplicate keys - resulting in map-string-stringarray)
|
||||
# 3: key-value separator (defaults to ':')
|
||||
# 4: item separator (defaults to ',')
|
||||
function yaml-map-string-stringarray {
|
||||
declare -r name="${1}"
|
||||
declare -r encoded="${2}"
|
||||
declare -r kv_sep="${3:-:}"
|
||||
declare -r item_sep="${4:-,}"
|
||||
|
||||
declare -a pairs # indexed array
|
||||
declare -A map # associative array
|
||||
IFS="${item_sep}" read -ra pairs <<<"${encoded}" # split on item_sep
|
||||
for pair in "${pairs[@]}"; do
|
||||
declare key
|
||||
declare value
|
||||
IFS="${kv_sep}" read -r key value <<<"${pair}" # split on kv_sep
|
||||
map[$key]="${map[$key]+${map[$key]}${item_sep}}${value}" # append values from duplicate keys
|
||||
done
|
||||
# only output if there is a non-empty map
|
||||
if [[ ${#map[@]} -gt 0 ]]; then
|
||||
echo "${name}:"
|
||||
for k in "${!map[@]}"; do
|
||||
echo " ${k}:"
|
||||
declare -a values
|
||||
IFS="${item_sep}" read -ra values <<<"${map[$k]}"
|
||||
for val in "${values[@]}"; do
|
||||
# declare across two lines so errexit can catch failures
|
||||
declare v
|
||||
v=$(yaml-quote "${val}")
|
||||
echo " - ${v}"
|
||||
done
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
# yaml-map-string-string converts the encoded structure to yaml format, and echoes the result
|
||||
# under the provided name. If the encoded structure is empty, echoes nothing.
|
||||
# 1: name to be output in yaml
|
||||
# 2: encoded map-string-string (no duplicate keys)
|
||||
# 3: bool, whether to yaml-quote the value string in the output (defaults to true)
|
||||
# 4: key-value separator (defaults to ':')
|
||||
# 5: item separator (defaults to ',')
|
||||
function yaml-map-string-string {
|
||||
declare -r name="${1}"
|
||||
declare -r encoded="${2}"
|
||||
declare -r quote_val_string="${3:-true}"
|
||||
declare -r kv_sep="${4:-:}"
|
||||
declare -r item_sep="${5:-,}"
|
||||
|
||||
declare -a pairs # indexed array
|
||||
declare -A map # associative array
|
||||
IFS="${item_sep}" read -ra pairs <<<"${encoded}" # split on item_sep # TODO(mtaufen): try quoting this too
|
||||
for pair in "${pairs[@]}"; do
|
||||
declare key
|
||||
declare value
|
||||
IFS="${kv_sep}" read -r key value <<<"${pair}" # split on kv_sep
|
||||
map[$key]="${value}" # add to associative array
|
||||
done
|
||||
# only output if there is a non-empty map
|
||||
if [[ ${#map[@]} -gt 0 ]]; then
|
||||
echo "${name}:"
|
||||
for k in "${!map[@]}"; do
|
||||
if [[ "${quote_val_string}" == "true" ]]; then
|
||||
# declare across two lines so errexit can catch failures
|
||||
declare v
|
||||
v=$(yaml-quote "${map[$k]}")
|
||||
echo " ${k}: ${v}"
|
||||
else
|
||||
echo " ${k}: ${map[$k]}"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
# $1: if 'true', we're rendering flags for a master, else a node
|
||||
function construct-kubelet-flags {
|
||||
local master=$1
|
||||
local flags="${KUBELET_TEST_LOG_LEVEL:-"--v=2"} ${KUBELET_TEST_ARGS:-}"
|
||||
flags+=" --allow-privileged=true"
|
||||
flags+=" --cloud-provider=gce"
|
||||
# Keep in sync with CONTAINERIZED_MOUNTER_HOME in configure-helper.sh
|
||||
flags+=" --experimental-mounter-path=/home/kubernetes/containerized_mounter/mounter"
|
||||
flags+=" --experimental-check-node-capabilities-before-mount=true"
|
||||
# Keep in sync with the mkdir command in configure-helper.sh (until the TODO is resolved)
|
||||
flags+=" --cert-dir=/var/lib/kubelet/pki/"
|
||||
# Configure the directory that the Kubelet should use to store dynamic config checkpoints
|
||||
flags+=" --dynamic-config-dir=/var/lib/kubelet/dynamic-config"
|
||||
|
||||
|
||||
if [[ "${master}" == "true" ]]; then
|
||||
flags+=" ${MASTER_KUBELET_TEST_ARGS:-}"
|
||||
if [[ "${REGISTER_MASTER_KUBELET:-false}" == "true" ]]; then
|
||||
#TODO(mikedanese): allow static pods to start before creating a client
|
||||
#flags+=" --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig"
|
||||
#flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig"
|
||||
flags+=" --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig"
|
||||
flags+=" --register-schedulable=false"
|
||||
fi
|
||||
else # For nodes
|
||||
flags+=" ${NODE_KUBELET_TEST_ARGS:-}"
|
||||
flags+=" --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig"
|
||||
flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig"
|
||||
fi
|
||||
# Network plugin
|
||||
if [[ -n "${NETWORK_PROVIDER:-}" || -n "${NETWORK_POLICY_PROVIDER:-}" ]]; then
|
||||
flags+=" --cni-bin-dir=/home/kubernetes/bin"
|
||||
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" || "${ENABLE_NETD:-}" == "true" ]]; then
|
||||
# Calico uses CNI always.
|
||||
# Note that network policy won't work for master node.
|
||||
if [[ "${master}" == "true" ]]; then
|
||||
flags+=" --network-plugin=${NETWORK_PROVIDER}"
|
||||
else
|
||||
flags+=" --network-plugin=cni"
|
||||
fi
|
||||
else
|
||||
# Otherwise use the configured value.
|
||||
flags+=" --network-plugin=${NETWORK_PROVIDER}"
|
||||
|
||||
fi
|
||||
fi
|
||||
if [[ -n "${NON_MASQUERADE_CIDR:-}" ]]; then
|
||||
flags+=" --non-masquerade-cidr=${NON_MASQUERADE_CIDR}"
|
||||
fi
|
||||
flags+=" --volume-plugin-dir=${VOLUME_PLUGIN_DIR}"
|
||||
local node_labels=$(build-node-labels ${master})
|
||||
if [[ -n "${node_labels:-}" ]]; then
|
||||
flags+=" --node-labels=${node_labels}"
|
||||
fi
|
||||
if [[ -n "${NODE_TAINTS:-}" ]]; then
|
||||
flags+=" --register-with-taints=${NODE_TAINTS}"
|
||||
fi
|
||||
if [[ -n "${EVICTION_HARD:-}" ]]; then
|
||||
flags+=" --eviction-hard=${EVICTION_HARD}"
|
||||
fi
|
||||
if [[ -n "${FEATURE_GATES:-}" ]]; then
|
||||
flags+=" --feature-gates=${FEATURE_GATES}"
|
||||
fi
|
||||
# TODO(mtaufen): ROTATE_CERTIFICATES seems unused; delete it?
|
||||
if [[ -n "${ROTATE_CERTIFICATES:-}" ]]; then
|
||||
flags+=" --rotate-certificates=true"
|
||||
@ -616,14 +674,101 @@ function construct-kubelet-flags {
|
||||
if [[ -n "${CONTAINER_RUNTIME:-}" ]]; then
|
||||
flags+=" --container-runtime=${CONTAINER_RUNTIME}"
|
||||
fi
|
||||
# TODO(mtaufen): CONTAINER_RUNTIME_ENDPOINT seems unused; delete it?
|
||||
if [[ -n "${CONTAINER_RUNTIME_ENDPOINT:-}" ]]; then
|
||||
flags+=" --container-runtime-endpoint=${CONTAINER_RUNTIME_ENDPOINT}"
|
||||
fi
|
||||
if [[ -n "${MAX_PODS_PER_NODE:-}" ]]; then
|
||||
flags+=" --max-pods=${MAX_PODS_PER_NODE}"
|
||||
fi
|
||||
|
||||
KUBELET_ARGS="${flags}"
|
||||
}
|
||||
|
||||
# $1: if 'true', we're rendering config for a master, else a node
|
||||
function build-kubelet-config {
|
||||
local master=$1
|
||||
local file=$2
|
||||
|
||||
rm -f "${file}"
|
||||
{
|
||||
declare quoted_dns_server_ip
|
||||
declare quoted_dns_domain
|
||||
quoted_dns_server_ip=$(yaml-quote "${DNS_SERVER_IP}")
|
||||
quoted_dns_domain=$(yaml-quote "${DNS_DOMAIN}")
|
||||
cat <<EOF
|
||||
kind: KubeletConfiguration
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
cgroupRoot: /
|
||||
clusterDNS:
|
||||
- ${quoted_dns_server_ip}
|
||||
clusterDomain: ${quoted_dns_domain}
|
||||
staticPodPath: /etc/kubernetes/manifests
|
||||
readOnlyPort: 10255
|
||||
EOF
|
||||
|
||||
# --- begin master-specific config ---
|
||||
if [[ "${master}" == "true" ]]; then
|
||||
cat <<EOF
|
||||
enableDebuggingHandlers: false
|
||||
hairpinMode: none
|
||||
authentication:
|
||||
webhook:
|
||||
enabled: false
|
||||
anonymous:
|
||||
enabled: true
|
||||
authorization:
|
||||
mode: AlwaysAllow
|
||||
EOF
|
||||
if [[ "${REGISTER_MASTER_KUBELET:-false}" == "false" ]]; then
|
||||
# Note: Standalone mode is used by GKE
|
||||
declare quoted_master_ip_range
|
||||
quoted_master_ip_range=$(yaml-quote "${MASTER_IP_RANGE}")
|
||||
cat <<EOF
|
||||
podCidr: ${quoted_master_ip_range}
|
||||
EOF
|
||||
fi
|
||||
# --- end master-specific config ---
|
||||
else
|
||||
# --- begin node-specific config ---
|
||||
# Keep authentication.x509.clientCAFile in sync with CA_CERT_BUNDLE_PATH in configure-helper.sh
|
||||
cat <<EOF
|
||||
enableDebuggingHandlers: true
|
||||
authentication:
|
||||
x509:
|
||||
clientCAFile: /etc/srv/kubernetes/pki/ca-certificates.crt
|
||||
EOF
|
||||
if [[ "${HAIRPIN_MODE:-}" == "promiscuous-bridge" ]] || \
|
||||
[[ "${HAIRPIN_MODE:-}" == "hairpin-veth" ]] || \
|
||||
[[ "${HAIRPIN_MODE:-}" == "none" ]]; then
|
||||
declare quoted_hairpin_mode
|
||||
quoted_hairpin_mode=$(yaml-quote "${HAIRPIN_MODE}")
|
||||
cat <<EOF
|
||||
hairpinMode: ${quoted_hairpin_mode}
|
||||
EOF
|
||||
fi
|
||||
# --- end node-specific config ---
|
||||
fi
|
||||
|
||||
# Note: ENABLE_MANIFEST_URL is used by GKE
|
||||
if [[ "${ENABLE_MANIFEST_URL:-}" == "true" ]]; then
|
||||
declare quoted_manifest_url
|
||||
quoted_manifest_url=$(yaml-quote "${MANIFEST_URL}")
|
||||
cat <<EOF
|
||||
staticPodURL: ${quoted_manifest_url}
|
||||
EOF
|
||||
yaml-map-string-stringarray 'staticPodURLHeader' "${MANIFEST_URL_HEADER}"
|
||||
fi
|
||||
|
||||
if [[ -n "${EVICTION_HARD:-}" ]]; then
|
||||
yaml-map-string-string 'evictionHard' "${EVICTION_HARD}" true '<'
|
||||
fi
|
||||
|
||||
if [[ -n "${FEATURE_GATES:-}" ]]; then
|
||||
yaml-map-string-string 'featureGates' "${FEATURE_GATES}" false '='
|
||||
fi
|
||||
} > "${file}"
|
||||
}
|
||||
|
||||
function build-kube-master-certs {
|
||||
local file=$1
|
||||
rm -f ${file}
|
||||
@ -646,7 +791,7 @@ function build-kube-env {
|
||||
local server_binary_tar_url=$SERVER_BINARY_TAR_URL
|
||||
local kube_manifests_tar_url="${KUBE_MANIFESTS_TAR_URL:-}"
|
||||
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \
|
||||
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]] ; then
|
||||
[[ "${master}" == "false" && ("${NODE_OS_DISTRIBUTION}" == "ubuntu" || "${NODE_OS_DISTRIBUTION}" == "custom") ]]; then
|
||||
# TODO: Support fallback .tar.gz settings on Container Linux
|
||||
server_binary_tar_url=$(split_csv "${SERVER_BINARY_TAR_URL}")
|
||||
kube_manifests_tar_url=$(split_csv "${KUBE_MANIFESTS_TAR_URL}")
|
||||
@ -670,9 +815,13 @@ SERVICE_CLUSTER_IP_RANGE: $(yaml-quote ${SERVICE_CLUSTER_IP_RANGE})
|
||||
KUBERNETES_MASTER_NAME: $(yaml-quote ${KUBERNETES_MASTER_NAME})
|
||||
ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false})
|
||||
ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none})
|
||||
ENABLE_PROMETHEUS_MONITORING: $(yaml-quote ${ENABLE_PROMETHEUS_MONITORING:-false})
|
||||
ENABLE_METRICS_SERVER: $(yaml-quote ${ENABLE_METRICS_SERVER:-false})
|
||||
ENABLE_METADATA_AGENT: $(yaml-quote ${ENABLE_METADATA_AGENT:-none})
|
||||
METADATA_AGENT_VERSION: $(yaml-quote ${METADATA_AGENT_VERSION:-})
|
||||
METADATA_AGENT_CPU_REQUEST: $(yaml-quote ${METADATA_AGENT_CPU_REQUEST:-})
|
||||
METADATA_AGENT_MEMORY_REQUEST: $(yaml-quote ${METADATA_AGENT_MEMORY_REQUEST:-})
|
||||
METADATA_AGENT_CLUSTER_LEVEL_CPU_REQUEST: $(yaml-quote ${METADATA_AGENT_CLUSTER_LEVEL_CPU_REQUEST:-})
|
||||
METADATA_AGENT_CLUSTER_LEVEL_MEMORY_REQUEST: $(yaml-quote ${METADATA_AGENT_CLUSTER_LEVEL_MEMORY_REQUEST:-})
|
||||
DOCKER_REGISTRY_MIRROR_URL: $(yaml-quote ${DOCKER_REGISTRY_MIRROR_URL:-})
|
||||
ENABLE_L7_LOADBALANCING: $(yaml-quote ${ENABLE_L7_LOADBALANCING:-none})
|
||||
ENABLE_CLUSTER_LOGGING: $(yaml-quote ${ENABLE_CLUSTER_LOGGING:-false})
|
||||
@ -716,6 +865,15 @@ ENABLE_CACHE_MUTATION_DETECTOR: $(yaml-quote ${ENABLE_CACHE_MUTATION_DETECTOR:-f
|
||||
ENABLE_PATCH_CONVERSION_DETECTOR: $(yaml-quote ${ENABLE_PATCH_CONVERSION_DETECTOR:-false})
|
||||
ADVANCED_AUDIT_POLICY: $(yaml-quote ${ADVANCED_AUDIT_POLICY:-})
|
||||
ADVANCED_AUDIT_BACKEND: $(yaml-quote ${ADVANCED_AUDIT_BACKEND:-log})
|
||||
ADVANCED_AUDIT_TRUNCATING_BACKEND: $(yaml-quote ${ADVANCED_AUDIT_TRUNCATING_BACKEND:-})
|
||||
ADVANCED_AUDIT_LOG_MODE: $(yaml-quote ${ADVANCED_AUDIT_LOG_MODE:-})
|
||||
ADVANCED_AUDIT_LOG_BUFFER_SIZE: $(yaml-quote ${ADVANCED_AUDIT_LOG_BUFFER_SIZE:-})
|
||||
ADVANCED_AUDIT_LOG_MAX_BATCH_SIZE: $(yaml-quote ${ADVANCED_AUDIT_LOG_MAX_BATCH_SIZE:-})
|
||||
ADVANCED_AUDIT_LOG_MAX_BATCH_WAIT: $(yaml-quote ${ADVANCED_AUDIT_LOG_MAX_BATCH_WAIT:-})
|
||||
ADVANCED_AUDIT_LOG_THROTTLE_QPS: $(yaml-quote ${ADVANCED_AUDIT_LOG_THROTTLE_QPS:-})
|
||||
ADVANCED_AUDIT_LOG_THROTTLE_BURST: $(yaml-quote ${ADVANCED_AUDIT_LOG_THROTTLE_BURST:-})
|
||||
ADVANCED_AUDIT_LOG_INITIAL_BACKOFF: $(yaml-quote ${ADVANCED_AUDIT_LOG_INITIAL_BACKOFF:-})
|
||||
ADVANCED_AUDIT_WEBHOOK_MODE: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_MODE:-})
|
||||
ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE:-})
|
||||
ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_SIZE: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_SIZE:-})
|
||||
ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_WAIT: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_WAIT:-})
|
||||
@ -724,6 +882,7 @@ ADVANCED_AUDIT_WEBHOOK_THROTTLE_BURST: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_THR
|
||||
ADVANCED_AUDIT_WEBHOOK_INITIAL_BACKOFF: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_INITIAL_BACKOFF:-})
|
||||
GCE_API_ENDPOINT: $(yaml-quote ${GCE_API_ENDPOINT:-})
|
||||
GCE_GLBC_IMAGE: $(yaml-quote ${GCE_GLBC_IMAGE:-})
|
||||
ENABLE_NODE_JOURNAL: $(yaml-quote ${ENABLE_NODE_JOURNAL:-false})
|
||||
PROMETHEUS_TO_SD_ENDPOINT: $(yaml-quote ${PROMETHEUS_TO_SD_ENDPOINT:-})
|
||||
PROMETHEUS_TO_SD_PREFIX: $(yaml-quote ${PROMETHEUS_TO_SD_PREFIX:-})
|
||||
ENABLE_PROMETHEUS_TO_SD: $(yaml-quote ${ENABLE_PROMETHEUS_TO_SD:-false})
|
||||
@ -734,9 +893,22 @@ CONTAINER_RUNTIME_NAME: $(yaml-quote ${CONTAINER_RUNTIME_NAME:-})
|
||||
NODE_LOCAL_SSDS_EXT: $(yaml-quote ${NODE_LOCAL_SSDS_EXT:-})
|
||||
LOAD_IMAGE_COMMAND: $(yaml-quote ${LOAD_IMAGE_COMMAND:-})
|
||||
ZONE: $(yaml-quote ${ZONE})
|
||||
REGION: $(yaml-quote ${REGION})
|
||||
VOLUME_PLUGIN_DIR: $(yaml-quote ${VOLUME_PLUGIN_DIR})
|
||||
KUBELET_ARGS: $(yaml-quote ${KUBELET_ARGS})
|
||||
REQUIRE_METADATA_KUBELET_CONFIG_FILE: $(yaml-quote true)
|
||||
ENABLE_NETD: $(yaml-quote ${ENABLE_NETD:-false})
|
||||
CUSTOM_NETD_YAML: |
|
||||
$(echo "${CUSTOM_NETD_YAML:-}" | sed -e "s/'/''/g")
|
||||
EOF
|
||||
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "gci" ]] || \
|
||||
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "gci" ]] || \
|
||||
[[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "cos" ]] || \
|
||||
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "cos" ]]; then
|
||||
cat >>$file <<EOF
|
||||
REMOUNT_VOLUME_PLUGIN_DIR: $(yaml-quote ${REMOUNT_VOLUME_PLUGIN_DIR:-true})
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}" ]; then
|
||||
cat >>$file <<EOF
|
||||
KUBE_APISERVER_REQUEST_TIMEOUT: $(yaml-quote ${KUBE_APISERVER_REQUEST_TIMEOUT})
|
||||
@ -747,8 +919,8 @@ EOF
|
||||
TERMINATED_POD_GC_THRESHOLD: $(yaml-quote ${TERMINATED_POD_GC_THRESHOLD})
|
||||
EOF
|
||||
fi
|
||||
if [[ "${master}" == "true" && ("${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci") || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \
|
||||
[[ "${master}" == "false" && ("${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci") || "${NODE_OS_DISTRIBUTION}" = "ubuntu" ]] ; then
|
||||
if [[ "${master}" == "true" && ("${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "ubuntu") ]] || \
|
||||
[[ "${master}" == "false" && ("${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" = "ubuntu" || "${NODE_OS_DISTRIBUTION}" = "custom") ]] ; then
|
||||
cat >>$file <<EOF
|
||||
KUBE_MANIFESTS_TAR_URL: $(yaml-quote ${kube_manifests_tar_url})
|
||||
KUBE_MANIFESTS_TAR_HASH: $(yaml-quote ${KUBE_MANIFESTS_TAR_HASH})
|
||||
@ -818,6 +990,7 @@ ETCD_CA_KEY: $(yaml-quote ${ETCD_CA_KEY_BASE64:-})
|
||||
ETCD_CA_CERT: $(yaml-quote ${ETCD_CA_CERT_BASE64:-})
|
||||
ETCD_PEER_KEY: $(yaml-quote ${ETCD_PEER_KEY_BASE64:-})
|
||||
ETCD_PEER_CERT: $(yaml-quote ${ETCD_PEER_CERT_BASE64:-})
|
||||
ENCRYPTION_PROVIDER_CONFIG: $(yaml-quote ${ENCRYPTION_PROVIDER_CONFIG:-})
|
||||
EOF
|
||||
if [[ "${ENABLE_TOKENREQUEST:-}" == "true" ]]; then
|
||||
cat >>$file <<EOF
|
||||
@ -875,6 +1048,11 @@ EOF
|
||||
if [ -n "${ETCD_QUOTA_BACKEND_BYTES:-}" ]; then
|
||||
cat >>$file <<EOF
|
||||
ETCD_QUOTA_BACKEND_BYTES: $(yaml-quote ${ETCD_QUOTA_BACKEND_BYTES})
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${ETCD_EXTRA_ARGS:-}" ]; then
|
||||
cat >>$file <<EOF
|
||||
ETCD_EXTRA_ARGS: $(yaml-quote ${ETCD_EXTRA_ARGS})
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${APISERVER_TEST_ARGS:-}" ]; then
|
||||
@ -956,10 +1134,26 @@ ENABLE_CLUSTER_AUTOSCALER: $(yaml-quote ${ENABLE_CLUSTER_AUTOSCALER})
|
||||
AUTOSCALER_MIG_CONFIG: $(yaml-quote ${AUTOSCALER_MIG_CONFIG})
|
||||
AUTOSCALER_EXPANDER_CONFIG: $(yaml-quote ${AUTOSCALER_EXPANDER_CONFIG})
|
||||
EOF
|
||||
if [[ "${master}" == "false" ]]; then
|
||||
# TODO(kubernetes/autoscaler#718): AUTOSCALER_ENV_VARS is a hotfix for cluster autoscaler,
|
||||
# which reads the kube-env to determine the shape of a node and was broken by #60020.
|
||||
# This should be removed as soon as a more reliable source of information is available!
|
||||
local node_labels=$(build-node-labels false)
|
||||
local node_taints="${NODE_TAINTS:-}"
|
||||
local autoscaler_env_vars="node_labels=${node_labels};node_taints=${node_taints}"
|
||||
cat >>$file <<EOF
|
||||
AUTOSCALER_ENV_VARS: $(yaml-quote ${autoscaler_env_vars})
|
||||
EOF
|
||||
fi
|
||||
fi
|
||||
if [ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]; then
|
||||
cat >>$file <<EOF
|
||||
SCHEDULING_ALGORITHM_PROVIDER: $(yaml-quote ${SCHEDULING_ALGORITHM_PROVIDER})
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${MAX_PODS_PER_NODE:-}" ]; then
|
||||
cat >>$file <<EOF
|
||||
MAX_PODS_PER_NODE: $(yaml-quote ${MAX_PODS_PER_NODE})
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
@ -1259,6 +1453,9 @@ minVersion = version.LooseVersion("1.3.0")
|
||||
required = [ "alpha", "beta", "core" ]
|
||||
data = json.loads(sys.argv[1])
|
||||
rel = data.get("Google Cloud SDK")
|
||||
if "CL @" in rel:
|
||||
print("Using dev version of gcloud: %s" %rel)
|
||||
exit(0)
|
||||
if rel != "HEAD" and version.LooseVersion(rel) < minVersion:
|
||||
print("gcloud version out of date ( < %s )" % minVersion)
|
||||
exit(1)
|
||||
@ -1392,7 +1589,7 @@ function get-template-name-from-version() {
|
||||
echo "${NODE_INSTANCE_PREFIX}-template-${1}" | cut -c 1-63 | sed 's/[\.\+]/-/g;s/-*$//g'
|
||||
}
|
||||
|
||||
# validates the NODE_LOCAL_SSDS_EXT variable
|
||||
# validates the NODE_LOCAL_SSDS_EXT variable
|
||||
function validate-node-local-ssds-ext(){
|
||||
ssdopts="${1}"
|
||||
|
||||
@ -1468,7 +1665,7 @@ function create-node-template() {
|
||||
done
|
||||
done
|
||||
fi
|
||||
|
||||
|
||||
if [[ ! -z ${NODE_LOCAL_SSDS+x} ]]; then
|
||||
# The NODE_LOCAL_SSDS check below fixes issue #49171
|
||||
# Some versions of seq will count down from 1 if "seq 0" is specified
|
||||
@ -1478,7 +1675,7 @@ function create-node-template() {
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
|
||||
local network=$(make-gcloud-network-argument \
|
||||
"${NETWORK_PROJECT}" \
|
||||
@ -1602,14 +1799,8 @@ function check-existing() {
|
||||
fi
|
||||
}
|
||||
|
||||
# TODO(#54017): Remove below logics for handling deprecated network mode field.
|
||||
# `x_gcloud_mode` was replaced by `x_gcloud_subnet_mode` in gcloud 175.0.0 and
|
||||
# the content changed as well. Keeping such logic to make the transition eaiser.
|
||||
function check-network-mode() {
|
||||
local mode="$(gcloud compute networks list --filter="name=('${NETWORK}')" --project ${NETWORK_PROJECT} --format='value(x_gcloud_subnet_mode)' || true)"
|
||||
if [[ -z "${mode}" ]]; then
|
||||
mode="$(gcloud compute networks list --filter="name=('${NETWORK}')" --project ${NETWORK_PROJECT} --format='value(x_gcloud_mode)' || true)"
|
||||
fi
|
||||
# The deprecated field uses lower case. Convert to upper case for consistency.
|
||||
echo "$(echo $mode | tr [a-z] [A-Z])"
|
||||
}
|
||||
@ -1623,7 +1814,7 @@ function create-network() {
|
||||
network_mode="custom"
|
||||
fi
|
||||
echo "Creating new ${network_mode} network: ${NETWORK}"
|
||||
gcloud compute networks create --project "${NETWORK_PROJECT}" "${NETWORK}" --mode="${network_mode}"
|
||||
gcloud compute networks create --project "${NETWORK_PROJECT}" "${NETWORK}" --subnet-mode="${network_mode}"
|
||||
else
|
||||
PREEXISTING_NETWORK=true
|
||||
PREEXISTING_NETWORK_MODE="$(check-network-mode)"
|
||||
@ -1658,8 +1849,8 @@ function create-network() {
|
||||
}
|
||||
|
||||
function expand-default-subnetwork() {
|
||||
gcloud compute networks switch-mode "${NETWORK}" \
|
||||
--mode custom \
|
||||
gcloud compute networks update "${NETWORK}" \
|
||||
--switch-to-custom-subnet-mode \
|
||||
--project "${NETWORK_PROJECT}" \
|
||||
--quiet || true
|
||||
gcloud compute networks subnets expand-ip-range "${NETWORK}" \
|
||||
@ -1695,12 +1886,6 @@ function create-subnetworks() {
|
||||
--region ${REGION} \
|
||||
${IP_ALIAS_SUBNETWORK} 2>/dev/null)
|
||||
if [[ -z ${subnet} ]]; then
|
||||
# Only allow auto-creation for default subnets
|
||||
if [[ ${IP_ALIAS_SUBNETWORK} != ${INSTANCE_PREFIX}-subnet-default ]]; then
|
||||
echo "${color_red}Subnetwork ${NETWORK}:${IP_ALIAS_SUBNETWORK} does not exist${color_norm}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Creating subnet ${NETWORK}:${IP_ALIAS_SUBNETWORK}"
|
||||
gcloud beta compute networks subnets create \
|
||||
${IP_ALIAS_SUBNETWORK} \
|
||||
@ -1713,7 +1898,7 @@ function create-subnetworks() {
|
||||
--secondary-range "services-default=${SERVICE_CLUSTER_IP_RANGE}"
|
||||
echo "Created subnetwork ${IP_ALIAS_SUBNETWORK}"
|
||||
else
|
||||
if ! echo ${subnet} | grep --quiet secondaryIpRanges ${subnet}; then
|
||||
if ! echo ${subnet} | grep --quiet secondaryIpRanges; then
|
||||
echo "${color_red}Subnet ${IP_ALIAS_SUBNETWORK} does not have a secondary range${color_norm}"
|
||||
exit 1
|
||||
fi
|
||||
@ -1787,35 +1972,38 @@ function delete-network() {
|
||||
}
|
||||
|
||||
function delete-subnetworks() {
|
||||
if [[ ${ENABLE_IP_ALIASES:-} != "true" ]]; then
|
||||
# If running in custom mode network we need to delete subnets
|
||||
mode="$(check-network-mode)"
|
||||
if [[ "${mode}" == "CUSTOM" ]]; then
|
||||
if [[ "${ENABLE_BIG_CLUSTER_SUBNETS}" = "true" ]]; then
|
||||
echo "Deleting default subnets..."
|
||||
# This value should be kept in sync with number of regions.
|
||||
local parallelism=9
|
||||
gcloud compute networks subnets list --network="${NETWORK}" --project "${NETWORK_PROJECT}" --format='value(region.basename())' | \
|
||||
xargs -i -P ${parallelism} gcloud --quiet compute networks subnets delete "${NETWORK}" --project "${NETWORK_PROJECT}" --region="{}" || true
|
||||
elif [[ "${CREATE_CUSTOM_NETWORK:-}" == "true" ]]; then
|
||||
echo "Deleting custom subnet..."
|
||||
gcloud --quiet compute networks subnets delete "${SUBNETWORK}" --project "${NETWORK_PROJECT}" --region="${REGION}" || true
|
||||
fi
|
||||
# If running in custom mode network we need to delete subnets manually.
|
||||
mode="$(check-network-mode)"
|
||||
if [[ "${mode}" == "CUSTOM" ]]; then
|
||||
if [[ "${ENABLE_BIG_CLUSTER_SUBNETS}" = "true" ]]; then
|
||||
echo "Deleting default subnets..."
|
||||
# This value should be kept in sync with number of regions.
|
||||
local parallelism=9
|
||||
gcloud compute networks subnets list --network="${NETWORK}" --project "${NETWORK_PROJECT}" --format='value(region.basename())' | \
|
||||
xargs -i -P ${parallelism} gcloud --quiet compute networks subnets delete "${NETWORK}" --project "${NETWORK_PROJECT}" --region="{}" || true
|
||||
elif [[ "${CREATE_CUSTOM_NETWORK:-}" == "true" ]]; then
|
||||
echo "Deleting custom subnet..."
|
||||
gcloud --quiet compute networks subnets delete "${SUBNETWORK}" --project "${NETWORK_PROJECT}" --region="${REGION}" || true
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
# Only delete automatically created subnets.
|
||||
if [[ ${IP_ALIAS_SUBNETWORK} == ${INSTANCE_PREFIX}-subnet-default ]]; then
|
||||
echo "Removing auto-created subnet ${NETWORK}:${IP_ALIAS_SUBNETWORK}"
|
||||
if [[ -n $(gcloud beta compute networks subnets describe \
|
||||
# If we reached here, it means we're not using custom network.
|
||||
# So the only thing we need to check is if IP-aliases was turned
|
||||
# on and we created a subnet for it. If so, we should delete it.
|
||||
if [[ ${ENABLE_IP_ALIASES:-} == "true" ]]; then
|
||||
# Only delete the subnet if we created it (i.e it's not pre-existing).
|
||||
if [[ -z "${KUBE_GCE_IP_ALIAS_SUBNETWORK:-}" ]]; then
|
||||
echo "Removing auto-created subnet ${NETWORK}:${IP_ALIAS_SUBNETWORK}"
|
||||
if [[ -n $(gcloud beta compute networks subnets describe \
|
||||
--project "${NETWORK_PROJECT}" \
|
||||
--region ${REGION} \
|
||||
${IP_ALIAS_SUBNETWORK} 2>/dev/null) ]]; then
|
||||
gcloud beta --quiet compute networks subnets delete \
|
||||
--project "${NETWORK_PROJECT}" \
|
||||
--region ${REGION} \
|
||||
${IP_ALIAS_SUBNETWORK} 2>/dev/null) ]]; then
|
||||
gcloud beta --quiet compute networks subnets delete \
|
||||
--project "${NETWORK_PROJECT}" \
|
||||
--region ${REGION} \
|
||||
${IP_ALIAS_SUBNETWORK}
|
||||
${IP_ALIAS_SUBNETWORK}
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
Reference in New Issue
Block a user