Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -14,8 +14,8 @@ go_test(
],
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
],
)

4
vendor/k8s.io/kubernetes/cluster/gce/gci/OWNERS generated vendored Normal file
View File

@ -0,0 +1,4 @@
approvers:
- dchen1107
- filbranden
- yguo0905

View File

@ -17,11 +17,13 @@ limitations under the License.
package gci
import (
"bytes"
"encoding/base64"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
@ -51,89 +53,97 @@ readonly DOCKER_REGISTRY="k8s.gcr.io"
readonly ENABLE_LEGACY_ABAC=false
readonly ETC_MANIFESTS=${KUBE_HOME}/etc/kubernetes/manifests
readonly KUBE_API_SERVER_DOCKER_TAG=v1.11.0-alpha.0.1808_3c7452dc11645d-dirty
readonly LOG_OWNER_USER=$(whoami)
readonly LOG_OWNER_USER=$(id -un)
readonly LOG_OWNER_GROUP=$(id -gn)
readonly SERVICEACCOUNT_ISSUER=https://foo.bar.baz
readonly SERVICEACCOUNT_KEY_PATH=/foo/bar/baz.key
{{if .EncryptionProviderConfig}}
ENCRYPTION_PROVIDER_CONFIG={{.EncryptionProviderConfig}}
{{end}}
ENCRYPTION_PROVIDER_CONFIG_PATH={{.EncryptionProviderConfigPath}}
readonly ETCD_KMS_KEY_ID={{.ETCDKMSKeyID}}
{{if .CloudKMSIntegration}}
readonly CLOUD_KMS_INTEGRATION=true
{{end}}
`
kubeAPIServerManifestFileName = "kube-apiserver.manifest"
kmsPluginManifestFileName = "kms-plugin-container.manifest"
kubeAPIServerStartFuncName = "start-kube-apiserver"
// Position of containers within a pod manifest
kmsPluginContainerIndex = 0
apiServerContainerIndexNoKMS = 0
apiServerContainerIndexWithKMS = 1
// command": [
// "/bin/sh", - Index 0
// "-c", - Index 1
// "exec /usr/local/bin/kube-apiserver " - Index 2
execArgsIndex = 2
socketVolumeMountIndexKMSPlugin = 1
socketVolumeMountIndexAPIServer = 0
)
type kubeAPIServerEnv struct {
KubeHome string
EncryptionProviderConfig string
EncryptionProviderConfigPath string
ETCDKMSKeyID string
EncryptionProviderConfig string
CloudKMSIntegration bool
}
type kubeAPIServerManifestTestCase struct {
*ManifestTestCase
apiServerContainer v1.Container
kmsPluginContainer v1.Container
}
func newKubeAPIServerManifestTestCase(t *testing.T) *kubeAPIServerManifestTestCase {
return &kubeAPIServerManifestTestCase{
ManifestTestCase: newManifestTestCase(t, kubeAPIServerManifestFileName, kubeAPIServerStartFuncName, []string{kmsPluginManifestFileName}),
ManifestTestCase: newManifestTestCase(t, kubeAPIServerManifestFileName, kubeAPIServerStartFuncName, nil),
}
}
func (c *kubeAPIServerManifestTestCase) mustLoadContainers() {
func (c *kubeAPIServerManifestTestCase) invokeTest(e kubeAPIServerEnv, kubeEnv string) {
c.mustInvokeFunc(kubeEnv, e)
c.mustLoadPodFromManifest()
switch len(c.pod.Spec.Containers) {
case 1:
c.apiServerContainer = c.pod.Spec.Containers[apiServerContainerIndexNoKMS]
case 2:
c.apiServerContainer = c.pod.Spec.Containers[apiServerContainerIndexWithKMS]
c.kmsPluginContainer = c.pod.Spec.Containers[kmsPluginContainerIndex]
default:
c.t.Fatalf("got %d containers in apiserver pod, want 1 or 2", len(c.pod.Spec.Containers))
}
}
func (c *kubeAPIServerManifestTestCase) invokeTest(e kubeAPIServerEnv) {
c.mustInvokeFunc(deployHelperEnv, e)
c.mustLoadContainers()
}
func getEncryptionProviderConfigFlag(path string) string {
return fmt.Sprintf("--experimental-encryption-provider-config=%s", path)
}
func TestEncryptionProviderFlag(t *testing.T) {
c := newKubeAPIServerManifestTestCase(t)
defer c.tearDown()
var (
// command": [
// "/bin/sh", - Index 0
// "-c", - Index 1
// "exec /usr/local/bin/kube-apiserver " - Index 2
execArgsIndex = 2
encryptionConfigFlag = "--encryption-provider-config"
)
e := kubeAPIServerEnv{
KubeHome: c.kubeHome,
EncryptionProviderConfig: base64.StdEncoding.EncodeToString([]byte("FOO")),
EncryptionProviderConfigPath: filepath.Join(c.kubeHome, "encryption-provider-config.yaml"),
testCases := []struct {
desc string
encryptionProviderConfig string
wantFlag bool
}{
{
desc: "ENCRYPTION_PROVIDER_CONFIG is set",
encryptionProviderConfig: base64.StdEncoding.EncodeToString([]byte("foo")),
wantFlag: true,
},
{
desc: "ENCRYPTION_PROVIDER_CONFIG is not set",
encryptionProviderConfig: "",
wantFlag: false,
},
}
c.invokeTest(e)
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
c := newKubeAPIServerManifestTestCase(t)
defer c.tearDown()
expectedFlag := getEncryptionProviderConfigFlag(e.EncryptionProviderConfigPath)
execArgs := c.apiServerContainer.Command[execArgsIndex]
if !strings.Contains(execArgs, expectedFlag) {
c.t.Fatalf("Got %q, wanted the flag to contain %q", execArgs, expectedFlag)
e := kubeAPIServerEnv{
KubeHome: c.kubeHome,
EncryptionProviderConfigPath: filepath.Join(c.kubeHome, "encryption-provider-config.yaml"),
EncryptionProviderConfig: tc.encryptionProviderConfig,
}
c.invokeTest(e, deployHelperEnv)
execArgs := c.pod.Spec.Containers[0].Command[execArgsIndex]
flagIsInArg := strings.Contains(execArgs, encryptionConfigFlag)
flag := fmt.Sprintf("%s=%s", encryptionConfigFlag, e.EncryptionProviderConfigPath)
switch {
case tc.wantFlag && !flagIsInArg:
t.Fatalf("Got %q,\n want flags to contain %q", execArgs, flag)
case !tc.wantFlag && flagIsInArg:
t.Fatalf("Got %q,\n do not want flags to contain %q", execArgs, encryptionConfigFlag)
case tc.wantFlag && flagIsInArg && !strings.Contains(execArgs, flag):
t.Fatalf("Got flags: %q, want it to contain %q", execArgs, flag)
}
})
}
}
@ -144,8 +154,8 @@ func TestEncryptionProviderConfig(t *testing.T) {
p := filepath.Join(c.kubeHome, "encryption-provider-config.yaml")
e := kubeAPIServerEnv{
KubeHome: c.kubeHome,
EncryptionProviderConfig: base64.StdEncoding.EncodeToString([]byte("FOO")),
EncryptionProviderConfigPath: p,
EncryptionProviderConfig: base64.StdEncoding.EncodeToString([]byte("foo")),
}
c.mustInvokeFunc(deployHelperEnv, e)
@ -153,60 +163,91 @@ func TestEncryptionProviderConfig(t *testing.T) {
if _, err := os.Stat(p); err != nil {
c.t.Fatalf("Expected encryption provider config to be written to %s, but stat failed with error: %v", p, err)
}
}
// TestKMSEncryptionProviderConfig asserts that if ETCD_KMS_KEY_ID is set then start-kube-apiserver will produce
// EncryptionProviderConfig file of type KMS and inject experimental-encryption-provider-config startup flag.
func TestKMSEncryptionProviderConfig(t *testing.T) {
c := newKubeAPIServerManifestTestCase(t)
defer c.tearDown()
e := kubeAPIServerEnv{
KubeHome: c.kubeHome,
EncryptionProviderConfigPath: filepath.Join(c.kubeHome, "encryption-provider-config.yaml"),
ETCDKMSKeyID: "FOO",
}
c.invokeTest(e)
expectedFlag := getEncryptionProviderConfigFlag(e.EncryptionProviderConfigPath)
execArgs := c.apiServerContainer.Command[execArgsIndex]
if !strings.Contains(execArgs, expectedFlag) {
c.t.Fatalf("Got %q, wanted the flag to contain %q", execArgs, expectedFlag)
}
p := filepath.Join(c.kubeHome, "encryption-provider-config.yaml")
if _, err := os.Stat(p); err != nil {
c.t.Fatalf("Expected encryption provider config to be written to %s, but stat failed with error: %v", p, err)
}
d, err := ioutil.ReadFile(p)
got, err := ioutil.ReadFile(p)
if err != nil {
c.t.Fatalf("Failed to read encryption provider config %s", p)
}
if !strings.Contains(string(d), "name: grpc-kms-provider") {
c.t.Fatalf("Got %s\n, wanted encryption provider config to be of type grpc-kms", string(d))
want := []byte("foo")
if !bytes.Equal(got, want) {
c.t.Fatalf("got encryptionConfig:\n%q\n, want encryptionConfig:\n%q", got, want)
}
}
func TestKMSPluginAndAPIServerSharedVolume(t *testing.T) {
c := newKubeAPIServerManifestTestCase(t)
defer c.tearDown()
var e = kubeAPIServerEnv{
KubeHome: c.kubeHome,
EncryptionProviderConfigPath: filepath.Join(c.kubeHome, "encryption-provider-config.yaml"),
ETCDKMSKeyID: "FOO",
func TestKMSIntegration(t *testing.T) {
var (
socketPath = "/var/run/kmsplugin"
dirOrCreate = v1.HostPathType(v1.HostPathDirectoryOrCreate)
socketName = "kmssocket"
)
testCases := []struct {
desc string
cloudKMSIntegration bool
wantVolume v1.Volume
wantVolMount v1.VolumeMount
}{
{
desc: "CLOUD_KMS_INTEGRATION is set",
cloudKMSIntegration: true,
wantVolume: v1.Volume{
Name: socketName,
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: socketPath,
Type: &dirOrCreate,
},
},
},
wantVolMount: v1.VolumeMount{
Name: socketName,
MountPath: socketPath,
},
},
{
desc: "CLOUD_KMS_INTEGRATION is not set",
cloudKMSIntegration: false,
},
}
c.invokeTest(e)
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
c := newKubeAPIServerManifestTestCase(t)
defer c.tearDown()
k := c.kmsPluginContainer.VolumeMounts[socketVolumeMountIndexKMSPlugin].MountPath
a := c.apiServerContainer.VolumeMounts[socketVolumeMountIndexAPIServer].MountPath
var e = kubeAPIServerEnv{
KubeHome: c.kubeHome,
EncryptionProviderConfigPath: filepath.Join(c.kubeHome, "encryption-provider-config.yaml"),
EncryptionProviderConfig: base64.StdEncoding.EncodeToString([]byte("foo")),
CloudKMSIntegration: tc.cloudKMSIntegration,
}
if k != a {
t.Fatalf("Got %s!=%s, wanted KMSPlugin VolumeMount #1:%s to be equal to kube-apiserver VolumeMount #0:%s",
k, a, k, a)
c.invokeTest(e, deployHelperEnv)
// By this point, we can be sure that kube-apiserver manifest is a valid POD.
var gotVolume v1.Volume
for _, v := range c.pod.Spec.Volumes {
if v.Name == socketName {
gotVolume = v
break
}
}
if !reflect.DeepEqual(gotVolume, tc.wantVolume) {
t.Errorf("got volume %v, want %v", gotVolume, tc.wantVolume)
}
var gotVolumeMount v1.VolumeMount
for _, v := range c.pod.Spec.Containers[0].VolumeMounts {
if v.Name == socketName {
gotVolumeMount = v
break
}
}
if !reflect.DeepEqual(gotVolumeMount, tc.wantVolMount) {
t.Errorf("got volumeMount %v, want %v", gotVolumeMount, tc.wantVolMount)
}
})
}
}

View File

@ -25,17 +25,6 @@ set -o errexit
set -o nounset
set -o pipefail
readonly UUID_MNT_PREFIX="/mnt/disks/by-uuid/google-local-ssds"
readonly UUID_BLOCK_PREFIX="/dev/disk/by-uuid/google-local-ssds"
readonly COREDNS_AUTOSCALER="Deployment/coredns"
readonly KUBEDNS_AUTOSCALER="Deployment/kube-dns"
# Use --retry-connrefused opt only if it's supported by curl.
CURL_RETRY_CONNREFUSED=""
if curl --help | grep -q -- '--retry-connrefused'; then
CURL_RETRY_CONNREFUSED='--retry-connrefused'
fi
function setup-os-params {
# Reset core_pattern. On GCI, the default core_pattern pipes the core dumps to
# /sbin/crash_reporter which is more restrictive in saving crash dumps. So for
@ -43,6 +32,40 @@ function setup-os-params {
echo "core.%e.%p.%t" > /proc/sys/kernel/core_pattern
}
# secure_random generates a secure random string of bytes. This function accepts
# a number of secure bytes desired and returns a base64 encoded string with at
# least the requested entropy. Rather than directly reading from /dev/urandom,
# we use uuidgen which calls getrandom(2). getrandom(2) verifies that the
# entropy pool has been initialized sufficiently for the desired operation
# before reading from /dev/urandom.
#
# ARGS:
# #1: number of secure bytes to generate. We round up to the nearest factor of 32.
function secure_random {
local infobytes="${1}"
if ((infobytes <= 0)); then
echo "Invalid argument to secure_random: infobytes='${infobytes}'" 1>&2
return 1
fi
local out=""
for (( i = 0; i < "${infobytes}"; i += 32 )); do
# uuids have 122 random bits, sha256 sums have 256 bits, so concatenate
# three uuids and take their sum. The sum is encoded in ASCII hex, hence the
# 64 character cut.
out+="$(
(
uuidgen --random;
uuidgen --random;
uuidgen --random;
) | sha256sum \
| head -c 64
)";
done
# Finally, convert the ASCII hex to base64 to increase the density.
echo -n "${out}" | xxd -r -p | base64 -w 0
}
function config-ip-firewall {
echo "Configuring IP firewall rules"
@ -51,18 +74,20 @@ function config-ip-firewall {
sysctl -w net.ipv4.conf.all.route_localnet=1
# The GCI image has host firewall which drop most inbound/forwarded packets.
# We need to add rules to accept all TCP/UDP/ICMP packets.
# We need to add rules to accept all TCP/UDP/ICMP/SCTP packets.
if iptables -w -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then
echo "Add rules to accept all inbound TCP/UDP/ICMP packets"
iptables -A INPUT -w -p TCP -j ACCEPT
iptables -A INPUT -w -p UDP -j ACCEPT
iptables -A INPUT -w -p ICMP -j ACCEPT
iptables -A INPUT -w -p SCTP -j ACCEPT
fi
if iptables -w -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then
echo "Add rules to accept all forwarded TCP/UDP/ICMP packets"
echo "Add rules to accept all forwarded TCP/UDP/ICMP/SCTP packets"
iptables -A FORWARD -w -p TCP -j ACCEPT
iptables -A FORWARD -w -p UDP -j ACCEPT
iptables -A FORWARD -w -p ICMP -j ACCEPT
iptables -A FORWARD -w -p SCTP -j ACCEPT
fi
# Flush iptables nat table
@ -568,6 +593,12 @@ EOF
cat <<EOF >>/etc/gce.conf
token-url = ${TOKEN_URL}
token-body = ${TOKEN_BODY}
EOF
fi
if [[ -n "${CONTAINER_API_ENDPOINT:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
container-api-endpoint = ${CONTAINER_API_ENDPOINT}
EOF
fi
if [[ -n "${PROJECT_ID:-}" ]]; then
@ -615,6 +646,15 @@ EOF
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
multizone = ${MULTIZONE}
EOF
fi
# Multimaster indicates that the cluster is HA.
# Currently the only HA clusters are regional.
# If we introduce zonal multimaster this will need to be revisited.
if [[ -n "${MULTIMASTER:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
regional = ${MULTIMASTER}
EOF
fi
if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then
@ -740,7 +780,7 @@ function create-master-audit-policy {
- group: "storage.k8s.io"'
cat <<EOF >"${path}"
apiVersion: audit.k8s.io/v1beta1
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
# The following requests were manually identified as high-volume and low-risk,
@ -788,6 +828,13 @@ rules:
resources:
- group: "" # core
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
- level: None
users: ["cluster-autoscaler"]
verbs: ["get", "update"]
namespaces: ["kube-system"]
resources:
- group: "" # core
resources: ["configmaps", "endpoints"]
# Don't log HPA fetching metrics.
- level: None
users:
@ -1152,6 +1199,7 @@ function start-kubelet {
local -r kubelet_env_file="/etc/default/kubelet"
local kubelet_opts="${KUBELET_ARGS} ${KUBELET_CONFIG_FILE_ARG:-}"
echo "KUBELET_OPTS=\"${kubelet_opts}\"" > "${kubelet_env_file}"
echo "KUBE_COVERAGE_FILE=\"/var/log/kubelet.cov\"" >> "${kubelet_env_file}"
# Write the systemd service file for kubelet.
cat <<EOF >/etc/systemd/system/kubelet.service
@ -1217,10 +1265,12 @@ EOF
# Create the log file and set its properties.
#
# $1 is the file to create.
# $2: the log owner uid to set for the log file.
# $3: the log owner gid to set for the log file.
function prepare-log-file {
touch $1
chmod 644 $1
chown "${LOG_OWNER_USER:-root}":"${LOG_OWNER_GROUP:-root}" $1
chown "${2:-${LOG_OWNER_USER:-root}}":"${3:-${LOG_OWNER_GROUP:-root}}" $1
}
# Prepares parameters for kube-proxy manifest.
@ -1327,7 +1377,6 @@ function prepare-etcd-manifest {
sed -i -e "s@{{ *cpulimit *}}@\"$4\"@g" "${temp_file}"
sed -i -e "s@{{ *hostname *}}@$host_name@g" "${temp_file}"
sed -i -e "s@{{ *host_ip *}}@$host_ip@g" "${temp_file}"
sed -i -e "s@{{ *srv_kube_path *}}@/etc/srv/kubernetes@g" "${temp_file}"
sed -i -e "s@{{ *etcd_cluster *}}@$etcd_cluster@g" "${temp_file}"
sed -i -e "s@{{ *liveness_probe_initial_delay *}}@${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${temp_file}"
# Get default storage backend from manifest file.
@ -1460,8 +1509,12 @@ function start-kube-apiserver {
params+=" --allow-privileged=true"
params+=" --cloud-provider=gce"
params+=" --client-ca-file=${CA_CERT_BUNDLE_PATH}"
params+=" --etcd-servers=http://127.0.0.1:2379"
params+=" --etcd-servers-overrides=/events#http://127.0.0.1:4002"
params+=" --etcd-servers=${ETCD_SERVERS:-http://127.0.0.1:2379}"
if [[ -z "${ETCD_SERVERS:-}" ]]; then
params+=" --etcd-servers-overrides=${ETCD_SERVERS_OVERRIDES:-/events#http://127.0.0.1:4002}"
elif [[ -n "${ETCD_SERVERS_OVERRIDES:-}" ]]; then
params+=" --etcd-servers-overrides=${ETCD_SERVERS_OVERRIDES:-}"
fi
params+=" --secure-port=443"
params+=" --tls-cert-file=${APISERVER_SERVER_CERT_PATH}"
params+=" --tls-private-key-file=${APISERVER_SERVER_KEY_PATH}"
@ -1517,39 +1570,15 @@ function start-kube-apiserver {
if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then
params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
fi
if [[ -n "${ETCD_QUORUM_READ:-}" ]]; then
params+=" --etcd-quorum-read=${ETCD_QUORUM_READ}"
fi
if [[ -n "${SERVICEACCOUNT_ISSUER:-}" ]]; then
params+=" --service-account-issuer=${SERVICEACCOUNT_ISSUER}"
params+=" --service-account-signing-key-file=${SERVICEACCOUNT_KEY_PATH}"
params+=" --service-account-api-audiences=${SERVICEACCOUNT_API_AUDIENCES}"
fi
params+=" --service-account-issuer=${SERVICEACCOUNT_ISSUER}"
params+=" --service-account-api-audiences=${SERVICEACCOUNT_ISSUER}"
params+=" --service-account-signing-key-file=${SERVICEACCOUNT_KEY_PATH}"
local audit_policy_config_mount=""
local audit_policy_config_volume=""
local audit_webhook_config_mount=""
local audit_webhook_config_volume=""
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]]; then
# We currently only support enabling with a fixed path and with built-in log
# rotation "disabled" (large value) so it behaves like kube-apiserver.log.
# External log rotation should be set up the same as for kube-apiserver.log.
params+=" --audit-log-path=/var/log/kube-apiserver-audit.log"
params+=" --audit-log-maxage=0"
params+=" --audit-log-maxbackup=0"
# Lumberjack doesn't offer any way to disable size-based rotation. It also
# has an in-memory counter that doesn't notice if you truncate the file.
# 2000000000 (in MiB) is a large number that fits in 31 bits. If the log
# grows at 10MiB/s (~30K QPS), it will rotate after ~6 years if apiserver
# never restarts. Please manually restart apiserver before this time.
params+=" --audit-log-maxsize=2000000000"
# Disable AdvancedAuditing enabled by default
if [[ -z "${FEATURE_GATES:-}" ]]; then
FEATURE_GATES="AdvancedAuditing=false"
else
FEATURE_GATES="${FEATURE_GATES},AdvancedAuditing=false"
fi
elif [[ "${ENABLE_APISERVER_ADVANCED_AUDIT:-}" == "true" ]]; then
if [[ "${ENABLE_APISERVER_ADVANCED_AUDIT:-}" == "true" ]]; then
local -r audit_policy_file="/etc/audit_policy.config"
params+=" --audit-policy-file=${audit_policy_file}"
# Create the audit policy file, and mount it into the apiserver pod.
@ -1597,8 +1626,6 @@ function start-kube-apiserver {
fi
fi
if [[ "${ADVANCED_AUDIT_BACKEND:-}" == *"webhook"* ]]; then
params+=" --audit-webhook-mode=batch"
# Create the audit webhook config file, and mount it into the apiserver pod.
local -r audit_webhook_config_file="/etc/audit_webhook.config"
params+=" --audit-webhook-config-file=${audit_webhook_config_file}"
@ -1609,6 +1636,8 @@ function start-kube-apiserver {
# Batching parameters
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_MODE:-}" ]]; then
params+=" --audit-webhook-mode=${ADVANCED_AUDIT_WEBHOOK_MODE}"
else
params+=" --audit-webhook-mode=batch"
fi
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE:-}" ]]; then
params+=" --audit-webhook-batch-buffer-size=${ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE}"
@ -1735,46 +1764,27 @@ function start-kube-apiserver {
local container_env=""
if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
container_env="\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\""
container_env+="{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}"
fi
if [[ -n "${ENABLE_PATCH_CONVERSION_DETECTOR:-}" ]]; then
if [[ -n "${container_env}" ]]; then
container_env="${container_env}, "
fi
container_env="\"name\": \"KUBE_PATCH_CONVERSION_DETECTOR\", \"value\": \"${ENABLE_PATCH_CONVERSION_DETECTOR}\""
container_env+="{\"name\": \"KUBE_PATCH_CONVERSION_DETECTOR\", \"value\": \"${ENABLE_PATCH_CONVERSION_DETECTOR}\"}"
fi
if [[ -n "${container_env}" ]]; then
container_env="\"env\":[{${container_env}}],"
container_env="\"env\":[${container_env}],"
fi
if [[ -n "${ETCD_KMS_KEY_ID:-}" ]]; then
ENCRYPTION_PROVIDER_CONFIG=$(cat << EOM | base64 | tr -d '\r\n'
kind: EncryptionConfig
apiVersion: v1
resources:
- resources:
- secrets
providers:
- kms:
name: grpc-kms-provider
cachesize: 1000
endpoint: unix:///var/run/kmsplugin/socket.sock
EOM
)
fi
local -r src_file="${src_dir}/kube-apiserver.manifest"
if [[ -n "${ENCRYPTION_PROVIDER_CONFIG:-}" ]]; then
ENCRYPTION_PROVIDER_CONFIG_PATH="${ENCRYPTION_PROVIDER_CONFIG_PATH:-/etc/srv/kubernetes/encryption-provider-config.yml}"
echo "${ENCRYPTION_PROVIDER_CONFIG}" | base64 --decode > "${ENCRYPTION_PROVIDER_CONFIG_PATH}"
params+=" --experimental-encryption-provider-config=${ENCRYPTION_PROVIDER_CONFIG_PATH}"
fi
# params is passed by reference, so no "$"
setup-etcd-encryption "${src_file}" params
src_file="${src_dir}/kube-apiserver.manifest"
# Evaluate variables.
local -r kube_apiserver_docker_tag="${KUBE_API_SERVER_DOCKER_TAG:-$(cat /home/kubernetes/kube-docker-files/kube-apiserver.docker_tag)}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
sed -i -e "s@{{srv_sshproxy_path}}@/etc/srv/sshproxy@g" "${src_file}"
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
@ -1799,67 +1809,116 @@ EOM
sed -i -e "s@{{image_policy_webhook_config_mount}}@${image_policy_webhook_config_mount}@g" "${src_file}"
sed -i -e "s@{{image_policy_webhook_config_volume}}@${image_policy_webhook_config_volume}@g" "${src_file}"
if [[ -z "${ETCD_KMS_KEY_ID:-}" ]]; then
# Removing KMS related placeholders.
sed -i -e " {
s@{{kms_plugin_container}}@@
cp "${src_file}" "${ETC_MANIFESTS:-/etc/kubernetes/manifests}"
}
s@{{kms_socket_mount}}@@
# Sets-up etcd encryption.
# Configuration of etcd level encryption consists of the following steps:
# 1. Writing encryption provider config to disk
# 2. Adding encryption-provider-config flag to kube-apiserver
# 3. Add kms-socket-vol and kms-socket-vol-mnt to enable communication with kms-plugin (if requested)
#
# Expects parameters:
# $1 - path to kube-apiserver template
# $2 - kube-apiserver startup flags (must be passed by reference)
#
# Assumes vars (supplied via kube-env):
# ENCRYPTION_PROVIDER_CONFIG
# CLOUD_KMS_INTEGRATION
# ENCRYPTION_PROVIDER_CONFIG_PATH (will default to /etc/srv/kubernetes/encryption-provider-config.yml)
function setup-etcd-encryption {
local kube_apiserver_template_path
local -n kube_api_server_params
local default_encryption_provider_config_vol
local default_encryption_provider_config_vol_mnt
local encryption_provider_config_vol_mnt
local encryption_provider_config_vol
local default_kms_socket_dir
local default_kms_socket_vol_mnt
local default_kms_socket_vol
local kms_socket_vol_mnt
local kms_socket_vol
local encryption_provider_config_path
kube_apiserver_template_path="$1"
if [[ -z "${ENCRYPTION_PROVIDER_CONFIG:-}" ]]; then
sed -i -e " {
s@{{encryption_provider_mount}}@@
s@{{kms_socket_volume}}@@
s@{{encryption_provider_volume}}@@
} " "${src_file}"
else
local kms_plugin_src_file="${src_dir}/kms-plugin-container.manifest"
if [[ ! -f "${kms_plugin_src_file}" ]]; then
echo "Error: KMS Integration was requested, but "${kms_plugin_src_file}" is missing."
exit 1
fi
if [[ ! -f "${ENCRYPTION_PROVIDER_CONFIG_PATH}" ]]; then
echo "Error: KMS Integration was requested, but "${ENCRYPTION_PROVIDER_CONFIG_PATH}" is missing."
exit 1
fi
# TODO: Validate that the encryption config is for KMS.
local kms_socket_dir="/var/run/kmsplugin"
# kms_socket_mnt is used by both kms_plugin and kube-apiserver - this is how these containers talk.
local kms_socket_mnt="{ \"name\": \"kmssocket\", \"mountPath\": \"${kms_socket_dir}\", \"readOnly\": false}"
local kms_socket_vol="{ \"name\": \"kmssocket\", \"hostPath\": {\"path\": \"${kms_socket_dir}\", \"type\": \"DirectoryOrCreate\"}}"
local kms_path_to_socket="${kms_socket_dir}/socket.sock"
local encryption_provider_mnt="{ \"name\": \"encryptionconfig\", \"mountPath\": \"${ENCRYPTION_PROVIDER_CONFIG_PATH}\", \"readOnly\": true}"
local encryption_provider_vol="{ \"name\": \"encryptionconfig\", \"hostPath\": {\"path\": \"${ENCRYPTION_PROVIDER_CONFIG_PATH}\", \"type\": \"File\"}}"
# TODO these are used in other places, convert to global.
local gce_conf_path="/etc/gce.conf"
local cloud_config_mount="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true}"
local kms_plugin_container=$(echo $(sed " {
s@{{kms_key_uri}}@${ETCD_KMS_KEY_ID}@
s@{{gce_conf_path}}@${gce_conf_path}@
s@{{kms_path_to_socket}}@${kms_path_to_socket}@
s@{{kms_socket_mount}}@${kms_socket_mnt}@
s@{{cloud_config_mount}}@${cloud_config_mount}@
} " "${kms_plugin_src_file}") | tr "\n" "\\n")
sed -i -e " {
s@{{kms_plugin_container}}@${kms_plugin_container},@
s@{{kms_socket_mount}}@${kms_socket_mnt},@
s@{{encryption_provider_mount}}@${encryption_provider_mnt},@
s@{{kms_socket_volume}}@${kms_socket_vol},@
s@{{encryption_provider_volume}}@${encryption_provider_vol},@
} " "${src_file}"
s@{{kms_socket_mount}}@@
s@{{kms_socket_volume}}@@
} " "${kube_apiserver_template_path}"
return
fi
cp "${src_file}" "${ETC_MANIFESTS:-/etc/kubernetes/manifests}"
kube_api_server_params="$2"
encryption_provider_config_path=${ENCRYPTION_PROVIDER_CONFIG_PATH:-/etc/srv/kubernetes/encryption-provider-config.yml}
echo "${ENCRYPTION_PROVIDER_CONFIG}" | base64 --decode > "${encryption_provider_config_path}"
kube_api_server_params+=" --encryption-provider-config=${encryption_provider_config_path}"
default_encryption_provider_config_vol=$(echo "{ \"name\": \"encryptionconfig\", \"hostPath\": {\"path\": \"${encryption_provider_config_path}\", \"type\": \"File\"}}" | base64 | tr -d '\r\n')
default_encryption_provider_config_vol_mnt=$(echo "{ \"name\": \"encryptionconfig\", \"mountPath\": \"${encryption_provider_config_path}\", \"readOnly\": true}" | base64 | tr -d '\r\n')
encryption_provider_config_vol_mnt=$(echo "${ENCRYPTION_PROVIDER_CONFIG_VOL_MNT:-"${default_encryption_provider_config_vol_mnt}"}" | base64 --decode)
encryption_provider_config_vol=$(echo "${ENCRYPTION_PROVIDER_CONFIG_VOL:-"${default_encryption_provider_config_vol}"}" | base64 --decode)
sed -i -e " {
s@{{encryption_provider_mount}}@${encryption_provider_config_vol_mnt},@
s@{{encryption_provider_volume}}@${encryption_provider_config_vol},@
} " "${kube_apiserver_template_path}"
if [[ -n "${CLOUD_KMS_INTEGRATION:-}" ]]; then
default_kms_socket_dir="/var/run/kmsplugin"
default_kms_socket_vol_mnt=$(echo "{ \"name\": \"kmssocket\", \"mountPath\": \"${default_kms_socket_dir}\", \"readOnly\": false}" | base64 | tr -d '\r\n')
default_kms_socket_vol=$(echo "{ \"name\": \"kmssocket\", \"hostPath\": {\"path\": \"${default_kms_socket_dir}\", \"type\": \"DirectoryOrCreate\"}}" | base64 | tr -d '\r\n')
kms_socket_vol_mnt=$(echo "${KMS_PLUGIN_SOCKET_VOL_MNT:-"${default_kms_socket_vol_mnt}"}" | base64 --decode)
kms_socket_vol=$(echo "${KMS_PLUGIN_SOCKET_VOL:-"${default_kms_socket_vol}"}" | base64 --decode)
sed -i -e " {
s@{{kms_socket_mount}}@${kms_socket_vol_mnt},@
s@{{kms_socket_volume}}@${kms_socket_vol},@
} " "${kube_apiserver_template_path}"
else
sed -i -e " {
s@{{kms_socket_mount}}@@
s@{{kms_socket_volume}}@@
} " "${kube_apiserver_template_path}"
fi
}
# Applies encryption provider config.
# This function may be triggered in two scenarios:
# 1. Decryption of etcd
# 2. Encryption of etcd is added after the cluster is deployed
# Both cases require that the existing secrets in etcd be re-proceeded.
#
# Assumes vars (supplied via kube-env):
# ENCRYPTION_PROVIDER_CONFIG_FORCE
function apply-encryption-config() {
if [[ "${ENCRYPTION_PROVIDER_CONFIG_FORCE:-false}" == "false" ]]; then
return
fi
# need kube-apiserver to be ready
until kubectl get secret; do
sleep ${ENCRYPTION_PROVIDER_CONFIG_FORCE_DELAY:-5}
done
retries=${ENCRYPTION_PROVIDER_CONFIG_FORCE_RETRIES:-5}
# The command below may fail when a conflict is detected during an update on a secret (something
# else updated the secret in the middle of our update).
# TODO: Retry only on errors caused by a conflict.
until (( retries == 0 )); do
# forces all secrets to be re-written to etcd, and in the process either encrypting or decrypting them
# https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/
if kubectl get secrets --all-namespaces -o json | kubectl replace -f -; then
break
fi
(( retries-- ))
sleep "${ENCRYPTION_PROVIDER_CONFIG_FORCE_RETRY_SLEEP:-3}"
done
}
# Starts kubernetes controller manager.
@ -1944,7 +2003,6 @@ function start-kube-controller-manager {
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-controller-manager.manifest"
# Evaluate variables.
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-controller-manager_docker_tag'\]}}@${kube_rc_docker_tag}@g" "${src_file}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
@ -1957,6 +2015,7 @@ function start-kube-controller-manager {
sed -i -e "s@{{pv_recycler_volume}}@${PV_RECYCLER_VOLUME}@g" "${src_file}"
sed -i -e "s@{{flexvolume_hostpath_mount}}@${FLEXVOLUME_HOSTPATH_MOUNT}@g" "${src_file}"
sed -i -e "s@{{flexvolume_hostpath}}@${FLEXVOLUME_HOSTPATH_VOLUME}@g" "${src_file}"
sed -i -e "s@{{cpurequest}}@${KUBE_CONTROLLER_MANAGER_CPU_REQUEST}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
@ -1991,10 +2050,10 @@ function start-kube-scheduler {
# Remove salt comments and replace variables with values.
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-scheduler.manifest"
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-scheduler_docker_tag'\]}}@${kube_scheduler_docker_tag}@g" "${src_file}"
sed -i -e "s@{{cpurequest}}@${KUBE_SCHEDULER_CPU_REQUEST}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
@ -2043,6 +2102,12 @@ function setup-addon-manifests {
copy-manifests "${psp_dir}" "${dst_dir}"
fi
fi
if [[ "${ENABLE_NODE_TERMINATION_HANDLER:-}" == "true" ]]; then
local -r nth_dir="${src_dir}/${3:-$2}/node-termination-handler"
if [[ -d "${nth_dir}" ]]; then
copy-manifests "${nth_dir}" "${dst_dir}"
fi
fi
}
# A function that downloads extra addons from a URL and puts them in the GCI
@ -2167,14 +2232,14 @@ function start-fluentd-resource-update {
wait-for-apiserver-and-update-fluentd &
}
# Update {{ container-runtime }} with actual container runtime name,
# and {{ container-runtime-endpoint }} with actual container runtime
# Update {{ fluentd_container_runtime_service }} with actual container runtime name,
# and {{ container_runtime_endpoint }} with actual container runtime
# endpoint.
function update-container-runtime {
local -r file="$1"
local -r container_runtime_endpoint="${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock}"
sed -i \
-e "s@{{ *container_runtime *}}@${CONTAINER_RUNTIME_NAME:-docker}@g" \
-e "s@{{ *fluentd_container_runtime_service *}}@${FLUENTD_CONTAINER_RUNTIME_SERVICE:-${CONTAINER_RUNTIME_NAME:-docker}}@g" \
-e "s@{{ *container_runtime_endpoint *}}@${container_runtime_endpoint#unix://}@g" \
"${file}"
}
@ -2200,6 +2265,17 @@ function update-prometheus-to-sd-parameters {
fi
}
# Updates parameters in yaml file for prometheus-to-sd configuration in daemon sets, or
# removes component if it is disabled.
function update-daemon-set-prometheus-to-sd-parameters {
if [[ "${DISABLE_PROMETHEUS_TO_SD_IN_DS:-}" == "true" ]]; then
# Removes all lines between two patterns (throws away prometheus-to-sd)
sed -i -e "/# BEGIN_PROMETHEUS_TO_SD/,/# END_PROMETHEUS_TO_SD/d" "$1"
else
update-prometheus-to-sd-parameters $1
fi
}
# Updates parameters in yaml file for event-exporter configuration
function update-event-exporter {
local -r stackdriver_resource_model="${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}"
@ -2232,6 +2308,7 @@ function setup-coredns-manifest {
function setup-fluentd {
local -r dst_dir="$1"
local -r fluentd_gcp_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-ds.yaml"
local -r fluentd_gcp_scaler_yaml="${dst_dir}/fluentd-gcp/scaler-deployment.yaml"
# Ingest logs against new resources like "k8s_container" and "k8s_node" if
# LOGGING_STACKDRIVER_RESOURCE_TYPES is "new".
# Ingest logs against old resources like "gke_container" and "gce_instance" if
@ -2244,9 +2321,12 @@ function setup-fluentd {
fluentd_gcp_configmap_name="fluentd-gcp-config-old"
fi
sed -i -e "s@{{ fluentd_gcp_configmap_name }}@${fluentd_gcp_configmap_name}@g" "${fluentd_gcp_yaml}"
fluentd_gcp_version="${FLUENTD_GCP_VERSION:-0.2-1.5.30-1-k8s}"
fluentd_gcp_yaml_version="${FLUENTD_GCP_YAML_VERSION:-v3.2.0}"
sed -i -e "s@{{ fluentd_gcp_yaml_version }}@${fluentd_gcp_yaml_version}@g" "${fluentd_gcp_yaml}"
sed -i -e "s@{{ fluentd_gcp_yaml_version }}@${fluentd_gcp_yaml_version}@g" "${fluentd_gcp_scaler_yaml}"
fluentd_gcp_version="${FLUENTD_GCP_VERSION:-0.6-1.6.0-1}"
sed -i -e "s@{{ fluentd_gcp_version }}@${fluentd_gcp_version}@g" "${fluentd_gcp_yaml}"
update-prometheus-to-sd-parameters ${fluentd_gcp_yaml}
update-daemon-set-prometheus-to-sd-parameters ${fluentd_gcp_yaml}
start-fluentd-resource-update ${fluentd_gcp_yaml}
update-container-runtime ${fluentd_gcp_configmap_yaml}
update-node-journal ${fluentd_gcp_configmap_yaml}
@ -2259,7 +2339,7 @@ function setup-kube-dns-manifest {
if [ -n "${CUSTOM_KUBE_DNS_YAML:-}" ]; then
# Replace with custom GKE kube-dns deployment.
cat > "${kubedns_file}" <<EOF
$(echo "$CUSTOM_KUBE_DNS_YAML")
$CUSTOM_KUBE_DNS_YAML
EOF
update-prometheus-to-sd-parameters ${kubedns_file}
fi
@ -2274,6 +2354,16 @@ EOF
fi
}
# Sets up the manifests of local dns cache agent for k8s addons.
function setup-nodelocaldns-manifest {
setup-addon-manifests "addons" "dns/nodelocaldns"
local -r localdns_file="${dst_dir}/dns/nodelocaldns/nodelocaldns.yaml"
# Replace the sed configurations with variable values.
sed -i -e "s/__PILLAR__DNS__DOMAIN__/${DNS_DOMAIN}/g" "${localdns_file}"
sed -i -e "s/__PILLAR__DNS__SERVER__/${DNS_SERVER_IP}/g" "${localdns_file}"
sed -i -e "s/__PILLAR__LOCAL__DNS__/${LOCAL_DNS_IP}/g" "${localdns_file}"
}
# Sets up the manifests of netd for k8s addons.
function setup-netd-manifest {
local -r netd_file="${dst_dir}/netd/netd.yaml"
@ -2282,7 +2372,7 @@ function setup-netd-manifest {
if [ -n "${CUSTOM_NETD_YAML:-}" ]; then
# Replace with custom GCP netd deployment.
cat > "${netd_file}" <<EOF
$(echo "$CUSTOM_NETD_YAML")
$CUSTOM_NETD_YAML
EOF
fi
}
@ -2330,9 +2420,9 @@ function start-kube-addons {
if [ -n "${CUSTOM_KUBE_PROXY_YAML:-}" ]; then
# Replace with custom GKE kube proxy.
cat > "$src_dir/kube-proxy/kube-proxy-ds.yaml" <<EOF
$(echo "$CUSTOM_KUBE_PROXY_YAML")
$CUSTOM_KUBE_PROXY_YAML
EOF
update-prometheus-to-sd-parameters "$src_dir/kube-proxy/kube-proxy-ds.yaml"
update-daemon-set-prometheus-to-sd-parameters "$src_dir/kube-proxy/kube-proxy-ds.yaml"
fi
prepare-kube-proxy-manifest-variables "$src_dir/kube-proxy/kube-proxy-ds.yaml"
setup-addon-manifests "addons" "kube-proxy"
@ -2355,10 +2445,17 @@ EOF
base_eventer_memory="190Mi"
base_metrics_cpu="${HEAPSTER_GCP_BASE_CPU:-80m}"
nanny_memory="90Mi"
local -r metrics_memory_per_node="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
local heapster_min_cluster_size="16"
local metrics_memory_per_node="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
local -r metrics_cpu_per_node="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
local -r eventer_memory_per_node="500"
local -r nanny_memory_per_node="200"
if [[ "${ENABLE_SYSTEM_ADDON_RESOURCE_OPTIMIZATIONS:-}" == "true" ]]; then
base_metrics_memory="${HEAPSTER_GCP_BASE_MEMORY:-100Mi}"
base_metrics_cpu="${HEAPSTER_GCP_BASE_CPU:-10m}"
metrics_memory_per_node="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
heapster_min_cluster_size="5"
fi
if [[ -n "${NUM_NODES:-}" && "${NUM_NODES}" -ge 1 ]]; then
num_kube_nodes="$((${NUM_NODES}+1))"
nanny_memory="$((${num_kube_nodes} * ${nanny_memory_per_node} + 90 * 1024))Ki"
@ -2379,6 +2476,7 @@ EOF
sed -i -e "s@{{ *eventer_memory_per_node *}}@${eventer_memory_per_node}@g" "${controller_yaml}"
sed -i -e "s@{{ *nanny_memory *}}@${nanny_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *metrics_cpu_per_node *}}@${metrics_cpu_per_node}@g" "${controller_yaml}"
sed -i -e "s@{{ *heapster_min_cluster_size *}}@${heapster_min_cluster_size}@g" "${controller_yaml}"
update-prometheus-to-sd-parameters ${controller_yaml}
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "stackdriver" ]]; then
@ -2406,10 +2504,29 @@ EOF
fi
if [[ "${ENABLE_METRICS_SERVER:-}" == "true" ]]; then
setup-addon-manifests "addons" "metrics-server"
base_metrics_server_cpu="40m"
base_metrics_server_memory="40Mi"
metrics_server_memory_per_node="4"
metrics_server_min_cluster_size="16"
if [[ "${ENABLE_SYSTEM_ADDON_RESOURCE_OPTIMIZATIONS:-}" == "true" ]]; then
base_metrics_server_cpu="40m"
base_metrics_server_memory="35Mi"
metrics_server_memory_per_node="4"
metrics_server_min_cluster_size="5"
fi
local -r metrics_server_yaml="${dst_dir}/metrics-server/metrics-server-deployment.yaml"
sed -i -e "s@{{ base_metrics_server_cpu }}@${base_metrics_server_cpu}@g" "${metrics_server_yaml}"
sed -i -e "s@{{ base_metrics_server_memory }}@${base_metrics_server_memory}@g" "${metrics_server_yaml}"
sed -i -e "s@{{ metrics_server_memory_per_node }}@${metrics_server_memory_per_node}@g" "${metrics_server_yaml}"
sed -i -e "s@{{ metrics_server_min_cluster_size }}@${metrics_server_min_cluster_size}@g" "${metrics_server_yaml}"
fi
if [[ "${ENABLE_NVIDIA_GPU_DEVICE_PLUGIN:-}" == "true" ]]; then
setup-addon-manifests "addons" "device-plugins/nvidia-gpu"
fi
if [[ "${ENABLE_NODE_TERMINATION_HANDLER:-}" == "true" ]]; then
setup-addon-manifests "addons" "node-termination-handler"
setup-node-termination-handler-manifest
fi
if [[ "${ENABLE_CLUSTER_DNS:-}" == "true" ]]; then
if [[ "${CLUSTER_DNS_CORE_DNS:-}" == "true" ]]; then
setup-addon-manifests "addons" "dns/coredns"
@ -2418,6 +2535,9 @@ EOF
setup-addon-manifests "addons" "dns/kube-dns"
setup-kube-dns-manifest
fi
if [[ "${ENABLE_NODELOCAL_DNS:-}" == "true" ]]; then
setup-nodelocaldns-manifest
fi
fi
if [[ "${ENABLE_NETD:-}" == "true" ]]; then
setup-netd-manifest
@ -2465,13 +2585,16 @@ EOF
if [[ "${ENABLE_DEFAULT_STORAGE_CLASS:-}" == "true" ]]; then
setup-addon-manifests "addons" "storage-class/gce"
fi
if [[ "${FEATURE_GATES:-}" =~ "AllAlpha=true" || "${FEATURE_GATES:-}" =~ "CSIDriverRegistry=true" || "${FEATURE_GATES:-}" =~ "CSINodeInfo=true" ]]; then
setup-addon-manifests "addons" "storage-crds"
fi
if [[ "${ENABLE_IP_MASQ_AGENT:-}" == "true" ]]; then
setup-addon-manifests "addons" "ip-masq-agent"
fi
if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]]; then
setup-addon-manifests "addons" "metadata-proxy/gce"
local -r metadata_proxy_yaml="${dst_dir}/metadata-proxy/gce/metadata-proxy.yaml"
update-prometheus-to-sd-parameters ${metadata_proxy_yaml}
update-daemon-set-prometheus-to-sd-parameters ${metadata_proxy_yaml}
fi
if [[ "${ENABLE_ISTIO:-}" == "true" ]]; then
if [[ "${ISTIO_AUTH_TYPE:-}" == "MUTUAL_TLS" ]]; then
@ -2480,21 +2603,26 @@ EOF
setup-addon-manifests "addons" "istio/noauth"
fi
fi
if [[ "${FEATURE_GATES:-}" =~ "RuntimeClass=true" ]]; then
setup-addon-manifests "addons" "runtimeclass"
fi
if [[ -n "${EXTRA_ADDONS_URL:-}" ]]; then
download-extra-addons
setup-addon-manifests "addons" "gce-extras"
fi
# Place addon manager pod manifest.
cp "${src_dir}/kube-addon-manager.yaml" /etc/kubernetes/manifests
src_file="${src_dir}/kube-addon-manager.yaml"
sed -i -e "s@{{kubectl_extra_prune_whitelist}}@${ADDON_MANAGER_PRUNE_WHITELIST:-}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
# Starts an image-puller - used in test clusters.
function start-image-puller {
echo "Start image-puller"
local -r e2e_image_puller_manifest="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/e2e-image-puller.manifest"
update-container-runtime "${e2e_image_puller_manifest}"
cp "${e2e_image_puller_manifest}" /etc/kubernetes/manifests/
function setup-node-termination-handler-manifest {
local -r nth_manifest="/etc/kubernetes/$1/$2/daemonset.yaml"
if [[ -n "${NODE_TERMINATION_HANDLER_IMAGE}" ]]; then
sed -i "s|image:.*|image: ${NODE_TERMINATION_HANDLER_IMAGE}|" "${nth_manifest}"
fi
}
# Setups manifests for ingress controller and gce-specific policies for service controller.
@ -2523,16 +2651,6 @@ function start-lb-controller {
fi
}
# Starts rescheduler.
function start-rescheduler {
if [[ "${ENABLE_RESCHEDULER:-}" == "true" ]]; then
echo "Start Rescheduler"
prepare-log-file /var/log/rescheduler.log
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/rescheduler.manifest" \
/etc/kubernetes/manifests/
fi
}
# Setup working directory for kubelet.
function setup-kubelet-dir {
echo "Making /var/lib/kubelet executable for kubelet"
@ -2638,6 +2756,21 @@ EOF
function main() {
echo "Start to configure instance for kubernetes"
readonly UUID_MNT_PREFIX="/mnt/disks/by-uuid/google-local-ssds"
readonly UUID_BLOCK_PREFIX="/dev/disk/by-uuid/google-local-ssds"
readonly COREDNS_AUTOSCALER="Deployment/coredns"
readonly KUBEDNS_AUTOSCALER="Deployment/kube-dns"
# Resource requests of master components.
KUBE_CONTROLLER_MANAGER_CPU_REQUEST="${KUBE_CONTROLLER_MANAGER_CPU_REQUEST:-200m}"
KUBE_SCHEDULER_CPU_REQUEST="${KUBE_SCHEDULER_CPU_REQUEST:-75m}"
# Use --retry-connrefused opt only if it's supported by curl.
CURL_RETRY_CONNREFUSED=""
if curl --help | grep -q -- '--retry-connrefused'; then
CURL_RETRY_CONNREFUSED='--retry-connrefused'
fi
KUBE_HOME="/home/kubernetes"
CONTAINERIZED_MOUNTER_HOME="${KUBE_HOME}/containerized_mounter"
PV_RECYCLER_OVERRIDE_TEMPLATE="${KUBE_HOME}/kube-manifests/kubernetes/pv-recycler-template.yaml"
@ -2667,9 +2800,9 @@ function main() {
fi
# generate the controller manager, scheduler and cluster autoscaler tokens here since they are only used on the master.
KUBE_CONTROLLER_MANAGER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_SCHEDULER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_CLUSTER_AUTOSCALER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_CONTROLLER_MANAGER_TOKEN="$(secure_random 32)"
KUBE_SCHEDULER_TOKEN="$(secure_random 32)"
KUBE_CLUSTER_AUTOSCALER_TOKEN="$(secure_random 32)"
setup-os-params
config-ip-firewall
@ -2714,14 +2847,11 @@ function main() {
start-kube-addons
start-cluster-autoscaler
start-lb-controller
start-rescheduler
apply-encryption-config &
else
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
start-kube-proxy
fi
if [[ "${PREPULL_E2E_IMAGES:-}" == "true" ]]; then
start-image-puller
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
start-node-problem-detector
fi
@ -2732,9 +2862,6 @@ function main() {
echo "Done for the configuration for kubernetes"
}
# use --source-only to test functions defined in this script.
if [[ "$#" -eq 1 && "${1}" == "--source-only" ]]; then
:
else
main "${@}"
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
main "${@}"
fi

View File

@ -28,8 +28,8 @@ DEFAULT_CNI_VERSION="v0.6.0"
DEFAULT_CNI_SHA1="d595d3ded6499a64e8dac02466e2f5f2ce257c9f"
DEFAULT_NPD_VERSION="v0.5.0"
DEFAULT_NPD_SHA1="650ecfb2ae495175ee43706d0bd862a1ea7f1395"
DEFAULT_CRICTL_VERSION="v1.11.0"
DEFAULT_CRICTL_SHA1="8f5142b985d314cdebb51afd55054d5ec00c442a"
DEFAULT_CRICTL_VERSION="v1.12.0"
DEFAULT_CRICTL_SHA1="82ef8b44849f9da0589c87e9865d4716573eec7f"
DEFAULT_MOUNTER_TAR_SHA="8003b798cf33c7f91320cd6ee5cec4fa22244571"
###
@ -247,6 +247,11 @@ function install-crictl {
fi
local -r crictl="crictl-${crictl_version}-linux-amd64"
# Create crictl config file.
cat > /etc/crictl.yaml <<EOF
runtime-endpoint: ${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock}
EOF
if is-preloaded "${crictl}" "${crictl_sha1}"; then
echo "crictl is preloaded"
return
@ -257,11 +262,6 @@ function install-crictl {
download-or-bust "${crictl_sha1}" "${crictl_path}/${crictl}"
mv "${KUBE_HOME}/${crictl}" "${KUBE_BIN}/crictl"
chmod a+x "${KUBE_BIN}/crictl"
# Create crictl config file.
cat > /etc/crictl.yaml <<EOF
runtime-endpoint: ${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock}
EOF
}
function install-exec-auth-plugin {
@ -275,6 +275,14 @@ function install-exec-auth-plugin {
download-or-bust "${plugin_sha1}" "${plugin_url}"
mv "${KUBE_HOME}/gke-exec-auth-plugin" "${KUBE_BIN}/gke-exec-auth-plugin"
chmod a+x "${KUBE_BIN}/gke-exec-auth-plugin"
if [[ ! "${EXEC_AUTH_PLUGIN_LICENSE_URL:-}" ]]; then
return
fi
local -r license_url="${EXEC_AUTH_PLUGIN_LICENSE_URL}"
echo "Downloading gke-exec-auth-plugin license"
download-or-bust "" "${license_url}"
mv "${KUBE_HOME}/LICENSE" "${KUBE_BIN}/gke-exec-auth-plugin-license"
}
function install-kube-manifests {
@ -421,6 +429,7 @@ function install-kube-binary-config {
install-crictl
if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then
# TODO(awly): include the binary and license in the OS image.
install-exec-auth-plugin
fi

View File

@ -125,7 +125,7 @@ func (c *ManifestTestCase) mustCreateEnv(envTemplate string, env interface{}) {
func (c *ManifestTestCase) mustInvokeFunc(envTemplate string, env interface{}) {
c.mustCreateEnv(envTemplate, env)
args := fmt.Sprintf("source %s ; source %s --source-only ; %s", c.envScriptPath, configureHelperScriptName, c.manifestFuncName)
args := fmt.Sprintf("source %s ; source %s; %s", c.envScriptPath, configureHelperScriptName, c.manifestFuncName)
cmd := exec.Command("bash", "-c", args)
bs, err := cmd.CombinedOutput()
@ -143,7 +143,7 @@ func (c *ManifestTestCase) mustLoadPodFromManifest() {
}
if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), json, &c.pod); err != nil {
c.t.Fatalf("Failed to decode manifest: %v", err)
c.t.Fatalf("Failed to decode manifest:\n%s\nerror: %v", json, err)
}
}

View File

@ -83,9 +83,6 @@ function create-master-instance-internal() {
retries=30
sleep_sec=60
fi
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
gcloud="gcloud beta"
fi
local -r master_name="${1}"
local -r address="${2:-}"