vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

View File

@ -0,0 +1,66 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = [
"dns_test.go",
"versions_test.go",
],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns",
library = ":go_default_library",
deps = [
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/util/version:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"dns.go",
"manifests.go",
"versions.go",
],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns",
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/features:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
"//cmd/kubeadm/app/util/apiclient:go_default_library",
"//pkg/api/legacyscheme:go_default_library",
"//pkg/util/version:go_default_library",
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,247 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dns
import (
"fmt"
"runtime"
apps "k8s.io/api/apps/v1beta2"
"k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kuberuntime "k8s.io/apimachinery/pkg/runtime"
clientset "k8s.io/client-go/kubernetes"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/features"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/util/version"
)
const (
// KubeDNSServiceAccountName describes the name of the ServiceAccount for the kube-dns addon
KubeDNSServiceAccountName = "kube-dns"
)
// EnsureDNSAddon creates the kube-dns or CoreDNS addon
func EnsureDNSAddon(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
k8sVersion, err := version.ParseSemantic(cfg.KubernetesVersion)
if err != nil {
return fmt.Errorf("couldn't parse kubernetes version %q: %v", cfg.KubernetesVersion, err)
}
if features.Enabled(cfg.FeatureGates, features.CoreDNS) {
return coreDNSAddon(cfg, client, k8sVersion)
}
return kubeDNSAddon(cfg, client, k8sVersion)
}
func kubeDNSAddon(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface, k8sVersion *version.Version) error {
if err := CreateServiceAccount(client); err != nil {
return err
}
dnsip, err := kubeadmconstants.GetDNSIP(cfg.Networking.ServiceSubnet)
if err != nil {
return err
}
var dnsBindAddr, dnsProbeAddr string
if dnsip.To4() == nil {
dnsBindAddr = "::1"
dnsProbeAddr = "[" + dnsBindAddr + "]"
} else {
dnsBindAddr = "127.0.0.1"
dnsProbeAddr = dnsBindAddr
}
// Get the YAML manifest conditionally based on the k8s version
kubeDNSDeploymentBytes := GetKubeDNSManifest(k8sVersion)
dnsDeploymentBytes, err := kubeadmutil.ParseTemplate(kubeDNSDeploymentBytes,
struct{ ImageRepository, Arch, Version, DNSBindAddr, DNSProbeAddr, DNSDomain, DNSProbeType, MasterTaintKey string }{
ImageRepository: cfg.ImageRepository,
Arch: runtime.GOARCH,
// Get the kube-dns version conditionally based on the k8s version
Version: GetDNSVersion(k8sVersion, kubeadmconstants.KubeDNS),
DNSBindAddr: dnsBindAddr,
DNSProbeAddr: dnsProbeAddr,
DNSDomain: cfg.Networking.DNSDomain,
DNSProbeType: GetKubeDNSProbeType(k8sVersion),
MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster,
})
if err != nil {
return fmt.Errorf("error when parsing kube-dns deployment template: %v", err)
}
dnsServiceBytes, err := kubeadmutil.ParseTemplate(KubeDNSService, struct{ DNSIP string }{
DNSIP: dnsip.String(),
})
if err != nil {
return fmt.Errorf("error when parsing kube-proxy configmap template: %v", err)
}
if err := createKubeDNSAddon(dnsDeploymentBytes, dnsServiceBytes, client); err != nil {
return err
}
fmt.Println("[addons] Applied essential addon: kube-dns")
return nil
}
// CreateServiceAccount creates the necessary serviceaccounts that kubeadm uses/might use, if they don't already exist.
func CreateServiceAccount(client clientset.Interface) error {
return apiclient.CreateOrUpdateServiceAccount(client, &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: KubeDNSServiceAccountName,
Namespace: metav1.NamespaceSystem,
},
})
}
func createKubeDNSAddon(deploymentBytes, serviceBytes []byte, client clientset.Interface) error {
kubednsDeployment := &apps.Deployment{}
if err := kuberuntime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), deploymentBytes, kubednsDeployment); err != nil {
return fmt.Errorf("unable to decode kube-dns deployment %v", err)
}
// Create the Deployment for kube-dns or update it in case it already exists
if err := apiclient.CreateOrUpdateDeployment(client, kubednsDeployment); err != nil {
return err
}
kubednsService := &v1.Service{}
return createDNSService(kubednsService, serviceBytes, client)
}
func coreDNSAddon(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface, k8sVersion *version.Version) error {
// Get the YAML manifest conditionally based on the k8s version
dnsDeploymentBytes := GetCoreDNSManifest(k8sVersion)
coreDNSDeploymentBytes, err := kubeadmutil.ParseTemplate(dnsDeploymentBytes, struct{ MasterTaintKey, Version string }{
MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster,
Version: GetDNSVersion(k8sVersion, kubeadmconstants.CoreDNS),
})
if err != nil {
return fmt.Errorf("error when parsing CoreDNS deployment template: %v", err)
}
// Get the config file for CoreDNS
coreDNSConfigMapBytes, err := kubeadmutil.ParseTemplate(CoreDNSConfigMap, struct{ DNSDomain, ServiceCIDR string }{
ServiceCIDR: cfg.Networking.ServiceSubnet,
DNSDomain: cfg.Networking.DNSDomain,
})
if err != nil {
return fmt.Errorf("error when parsing CoreDNS configMap template: %v", err)
}
dnsip, err := kubeadmconstants.GetDNSIP(cfg.Networking.ServiceSubnet)
if err != nil {
return err
}
coreDNSServiceBytes, err := kubeadmutil.ParseTemplate(KubeDNSService, struct{ DNSIP string }{
DNSIP: dnsip.String(),
})
if err != nil {
return fmt.Errorf("error when parsing CoreDNS service template: %v", err)
}
if err := createCoreDNSAddon(coreDNSDeploymentBytes, coreDNSServiceBytes, coreDNSConfigMapBytes, client); err != nil {
return err
}
fmt.Println("[addons] Applied essential addon: CoreDNS")
return nil
}
func createCoreDNSAddon(deploymentBytes, serviceBytes, configBytes []byte, client clientset.Interface) error {
coreDNSConfigMap := &v1.ConfigMap{}
if err := kuberuntime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), configBytes, coreDNSConfigMap); err != nil {
return fmt.Errorf("unable to decode CoreDNS configmap %v", err)
}
// Create the ConfigMap for CoreDNS or update it in case it already exists
if err := apiclient.CreateOrUpdateConfigMap(client, coreDNSConfigMap); err != nil {
return err
}
coreDNSClusterRoles := &rbac.ClusterRole{}
if err := kuberuntime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), []byte(CoreDNSClusterRole), coreDNSClusterRoles); err != nil {
return fmt.Errorf("unable to decode CoreDNS clusterroles %v", err)
}
// Create the Clusterroles for CoreDNS or update it in case it already exists
if err := apiclient.CreateOrUpdateClusterRole(client, coreDNSClusterRoles); err != nil {
return err
}
coreDNSClusterRolesBinding := &rbac.ClusterRoleBinding{}
if err := kuberuntime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), []byte(CoreDNSClusterRoleBinding), coreDNSClusterRolesBinding); err != nil {
return fmt.Errorf("unable to decode CoreDNS clusterrolebindings %v", err)
}
// Create the Clusterrolebindings for CoreDNS or update it in case it already exists
if err := apiclient.CreateOrUpdateClusterRoleBinding(client, coreDNSClusterRolesBinding); err != nil {
return err
}
coreDNSServiceAccount := &v1.ServiceAccount{}
if err := kuberuntime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), []byte(CoreDNSServiceAccount), coreDNSServiceAccount); err != nil {
return fmt.Errorf("unable to decode CoreDNS configmap %v", err)
}
// Create the ConfigMap for CoreDNS or update it in case it already exists
if err := apiclient.CreateOrUpdateServiceAccount(client, coreDNSServiceAccount); err != nil {
return err
}
coreDNSDeployment := &apps.Deployment{}
if err := kuberuntime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), deploymentBytes, coreDNSDeployment); err != nil {
return fmt.Errorf("unable to decode CoreDNS deployment %v", err)
}
// Create the Deployment for CoreDNS or update it in case it already exists
if err := apiclient.CreateOrUpdateDeployment(client, coreDNSDeployment); err != nil {
return err
}
coreDNSService := &v1.Service{}
return createDNSService(coreDNSService, serviceBytes, client)
}
func createDNSService(dnsService *v1.Service, serviceBytes []byte, client clientset.Interface) error {
if err := kuberuntime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), serviceBytes, dnsService); err != nil {
return fmt.Errorf("unable to decode the DNS service %v", err)
}
// Can't use a generic apiclient helper func here as we have to tolerate more than AlreadyExists.
if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Create(dnsService); err != nil {
// Ignore if the Service is invalid with this error message:
// Service "kube-dns" is invalid: spec.clusterIP: Invalid value: "10.96.0.10": provided IP is already allocated
if !apierrors.IsAlreadyExists(err) && !apierrors.IsInvalid(err) {
return fmt.Errorf("unable to create a new DNS service: %v", err)
}
if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Update(dnsService); err != nil {
return fmt.Errorf("unable to create/update the DNS service: %v", err)
}
}
return nil
}

View File

@ -0,0 +1,178 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dns
import (
"testing"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
clientsetfake "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
api "k8s.io/kubernetes/pkg/apis/core"
)
func TestCreateServiceAccount(t *testing.T) {
tests := []struct {
name string
createErr error
expectErr bool
}{
{
"error-free case",
nil,
false,
},
{
"duplication errors should be ignored",
apierrors.NewAlreadyExists(api.Resource(""), ""),
false,
},
{
"unexpected errors should be returned",
apierrors.NewUnauthorized(""),
true,
},
}
for _, tc := range tests {
client := clientsetfake.NewSimpleClientset()
if tc.createErr != nil {
client.PrependReactor("create", "serviceaccounts", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, tc.createErr
})
}
err := CreateServiceAccount(client)
if tc.expectErr {
if err == nil {
t.Errorf("CreateServiceAccounts(%s) wanted err, got nil", tc.name)
}
continue
} else if !tc.expectErr && err != nil {
t.Errorf("CreateServiceAccounts(%s) returned unexpected err: %v", tc.name, err)
}
wantResourcesCreated := 1
if len(client.Actions()) != wantResourcesCreated {
t.Errorf("CreateServiceAccounts(%s) should have made %d actions, but made %d", tc.name, wantResourcesCreated, len(client.Actions()))
}
for _, action := range client.Actions() {
if action.GetVerb() != "create" || action.GetResource().Resource != "serviceaccounts" {
t.Errorf("CreateServiceAccounts(%s) called [%v %v], but wanted [create serviceaccounts]",
tc.name, action.GetVerb(), action.GetResource().Resource)
}
}
}
}
func TestCompileManifests(t *testing.T) {
var tests = []struct {
manifest string
data interface{}
expected bool
}{
{
manifest: v180AndAboveKubeDNSDeployment,
data: struct{ ImageRepository, Arch, Version, DNSBindAddr, DNSProbeAddr, DNSDomain, DNSProbeType, MasterTaintKey string }{
ImageRepository: "foo",
Arch: "foo",
Version: "foo",
DNSBindAddr: "foo",
DNSProbeAddr: "foo",
DNSDomain: "foo",
DNSProbeType: "foo",
MasterTaintKey: "foo",
},
expected: true,
},
{
manifest: KubeDNSService,
data: struct{ DNSIP string }{
DNSIP: "foo",
},
expected: true,
},
{
manifest: CoreDNSDeployment,
data: struct{ MasterTaintKey, Version string }{
MasterTaintKey: "foo",
Version: "foo",
},
expected: true,
},
{
manifest: KubeDNSService,
data: struct{ DNSIP string }{
DNSIP: "foo",
},
expected: true,
},
{
manifest: CoreDNSConfigMap,
data: struct{ DNSDomain, ServiceCIDR string }{
DNSDomain: "foo",
ServiceCIDR: "foo",
},
expected: true,
},
}
for _, rt := range tests {
_, actual := kubeadmutil.ParseTemplate(rt.manifest, rt.data)
if (actual == nil) != rt.expected {
t.Errorf(
"failed CompileManifests:\n\texpected: %t\n\t actual: %t",
rt.expected,
(actual == nil),
)
}
}
}
func TestGetDNSIP(t *testing.T) {
var tests = []struct {
svcSubnet, expectedDNSIP string
}{
{
svcSubnet: "10.96.0.0/12",
expectedDNSIP: "10.96.0.10",
},
{
svcSubnet: "10.87.116.64/26",
expectedDNSIP: "10.87.116.74",
},
}
for _, rt := range tests {
dnsIP, err := kubeadmconstants.GetDNSIP(rt.svcSubnet)
if err != nil {
t.Fatalf("couldn't get dnsIP : %v", err)
}
actualDNSIP := dnsIP.String()
if actualDNSIP != rt.expectedDNSIP {
t.Errorf(
"failed GetDNSIP\n\texpected: %s\n\t actual: %s",
rt.expectedDNSIP,
actualDNSIP,
)
}
}
}

View File

@ -0,0 +1,347 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dns
const (
// v180AndAboveKubeDNSDeployment is the kube-dns Deployment manifest for the kube-dns manifest for v1.7+
v180AndAboveKubeDNSDeployment = `
apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
containers:
- name: kubedns
image: {{ .ImageRepository }}/k8s-dns-kube-dns-{{ .Arch }}:{{ .Version }}
imagePullPolicy: IfNotPresent
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --domain={{ .DNSDomain }}.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: {{ .ImageRepository }}/k8s-dns-dnsmasq-nanny-{{ .Arch }}:{{ .Version }}
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --no-negcache
- --log-facility=-
- --server=/{{ .DNSDomain }}/{{ .DNSBindAddr }}#10053
- --server=/in-addr.arpa/{{ .DNSBindAddr }}#10053
- --server=/ip6.arpa/{{ .DNSBindAddr }}#10053
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: {{ .ImageRepository }}/k8s-dns-sidecar-{{ .Arch }}:{{ .Version }}
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
- --probe=kubedns,{{ .DNSProbeAddr }}:10053,kubernetes.default.svc.{{ .DNSDomain }},5,{{ .DNSProbeType }}
- --probe=dnsmasq,{{ .DNSProbeAddr }}:53,kubernetes.default.svc.{{ .DNSDomain }},5,{{ .DNSProbeType }}
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
serviceAccountName: kube-dns
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: {{ .MasterTaintKey }}
effect: NoSchedule
# TODO: Remove this affinity field as soon as we are using manifest lists
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/arch
operator: In
values:
- {{ .Arch }}
`
// KubeDNSService is the kube-dns Service manifest
KubeDNSService = `
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeDNS"
name: kube-dns
namespace: kube-system
# Without this resourceVersion value, an update of the Service between versions will yield:
# Service "kube-dns" is invalid: metadata.resourceVersion: Invalid value: "": must be specified for an update
resourceVersion: "0"
spec:
clusterIP: {{ .DNSIP }}
ports:
- name: dns
port: 53
protocol: UDP
targetPort: 53
- name: dns-tcp
port: 53
protocol: TCP
targetPort: 53
selector:
k8s-app: kube-dns
`
// CoreDNSDeployment is the CoreDNS Deployment manifest
CoreDNSDeployment = `
apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
spec:
replicas: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
serviceAccountName: coredns
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: {{ .MasterTaintKey }}
effect: NoSchedule
containers:
- name: coredns
image: coredns/coredns:{{ .Version }}
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
`
// CoreDNSConfigMap is the CoreDNS ConfigMap manifest
CoreDNSConfigMap = `
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
log
health
kubernetes {{ .DNSDomain }} {{ .ServiceCIDR }} {
pods insecure
}
prometheus
proxy . /etc/resolv.conf
cache 30
}
`
// CoreDNSClusterRole is the CoreDNS ClusterRole manifest
CoreDNSClusterRole = `
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
`
// CoreDNSClusterRoleBinding is the CoreDNS Clusterrolebinding manifest
CoreDNSClusterRoleBinding = `
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
`
// CoreDNSServiceAccount is the CoreDNS ServiceAccount manifest
CoreDNSServiceAccount = `
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
`
)

View File

@ -0,0 +1,79 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dns
import (
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/pkg/util/version"
)
const (
kubeDNSv180AndAboveVersion = "1.14.5"
kubeDNSv190AndAboveVersion = "1.14.7"
kubeDNSProbeSRV = "SRV"
kubeDNSProbeA = "A"
coreDNSVersion = "1.0.1"
)
// GetDNSVersion returns the right kube-dns version for a specific k8s version
func GetDNSVersion(kubeVersion *version.Version, dns string) string {
// v1.8.0+ uses kube-dns 1.14.5
// v1.9.0+ uses kube-dns 1.14.7
// v1.9.0+ uses CoreDNS 1.0.1
// In the future when the version is bumped at HEAD; add conditional logic to return the right versions
// Also, the version might be bumped for different k8s releases on the same branch
switch dns {
case kubeadmconstants.KubeDNS:
// return the kube-dns version
if kubeVersion.Major() == 1 && kubeVersion.Minor() >= 9 {
return kubeDNSv190AndAboveVersion
}
return kubeDNSv180AndAboveVersion
case kubeadmconstants.CoreDNS:
// return the CoreDNS version
return coreDNSVersion
default:
return kubeDNSv180AndAboveVersion
}
}
// GetKubeDNSProbeType returns the right kube-dns probe for a specific k8s version
func GetKubeDNSProbeType(kubeVersion *version.Version) string {
// v1.8.0+ uses type A, just return that here
// In the future when the kube-dns version is bumped at HEAD; add conditional logic to return the right versions
// Also, the version might be bumped for different k8s releases on the same branch
if kubeVersion.Major() == 1 && kubeVersion.Minor() >= 9 {
return kubeDNSProbeSRV
}
return kubeDNSProbeA
}
// GetKubeDNSManifest returns the right kube-dns YAML manifest for a specific k8s version
func GetKubeDNSManifest(kubeVersion *version.Version) string {
// v1.8.0+ has only one known YAML manifest spec, just return that here
// In the future when the kube-dns version is bumped at HEAD; add conditional logic to return the right manifest
return v180AndAboveKubeDNSDeployment
}
// GetCoreDNSManifest returns the right CoreDNS YAML manifest for a specific k8s version
func GetCoreDNSManifest(kubeVersion *version.Version) string {
// v1.9.0+ has only one known YAML manifest spec, just return that here
// In the future when the CoreDNS version is bumped at HEAD; add conditional logic to return the right manifest
return CoreDNSDeployment
}

View File

@ -0,0 +1,126 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dns
import (
"testing"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/pkg/util/version"
)
func TestGetKubeDNSVersion(t *testing.T) {
var tests = []struct {
k8sVersion, expected string
}{
{
k8sVersion: "v1.7.0",
expected: "1.14.5",
},
{
k8sVersion: "v1.7.1",
expected: "1.14.5",
},
{
k8sVersion: "v1.7.2",
expected: "1.14.5",
},
{
k8sVersion: "v1.7.3",
expected: "1.14.5",
},
{
k8sVersion: "v1.8.0-alpha.2",
expected: "1.14.5",
},
{
k8sVersion: "v1.8.0",
expected: "1.14.5",
},
{
k8sVersion: "v1.9.0",
expected: "1.14.7",
},
}
for _, rt := range tests {
k8sVersion, err := version.ParseSemantic(rt.k8sVersion)
if err != nil {
t.Fatalf("couldn't parse kubernetes version %q: %v", rt.k8sVersion, err)
}
actualDNSVersion := GetDNSVersion(k8sVersion, kubeadmconstants.KubeDNS)
if actualDNSVersion != rt.expected {
t.Errorf(
"failed GetDNSVersion:\n\texpected: %s\n\t actual: %s",
rt.expected,
actualDNSVersion,
)
}
}
}
func TestGetKubeDNSProbeType(t *testing.T) {
var tests = []struct {
k8sVersion, expected string
}{
{
k8sVersion: "v1.7.0",
expected: "A",
},
{
k8sVersion: "v1.7.1",
expected: "A",
},
{
k8sVersion: "v1.7.2",
expected: "A",
},
{
k8sVersion: "v1.7.3",
expected: "A",
},
{
k8sVersion: "v1.8.0-alpha.2",
expected: "A",
},
{
k8sVersion: "v1.8.0",
expected: "A",
},
{
k8sVersion: "v1.9.0",
expected: "SRV",
},
}
for _, rt := range tests {
k8sVersion, err := version.ParseSemantic(rt.k8sVersion)
if err != nil {
t.Fatalf("couldn't parse kubernetes version %q: %v", rt.k8sVersion, err)
}
actualDNSProbeType := GetKubeDNSProbeType(k8sVersion)
if actualDNSProbeType != rt.expected {
t.Errorf(
"failed GetKubeDNSProbeType:\n\texpected: %s\n\t actual: %s",
rt.expected,
actualDNSProbeType,
)
}
}
}

View File

@ -0,0 +1,66 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = ["proxy_test.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/proxy",
library = ":go_default_library",
deps = [
"//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
"//cmd/kubeadm/app/util/config:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/proxy/apis/kubeproxyconfig/v1alpha1:go_default_library",
"//pkg/util/pointer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"manifests.go",
"proxy.go",
],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/proxy",
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
"//cmd/kubeadm/app/util/apiclient:go_default_library",
"//pkg/api/legacyscheme:go_default_library",
"//pkg/proxy/apis/kubeproxyconfig/scheme:go_default_library",
"//pkg/proxy/apis/kubeproxyconfig/v1alpha1:go_default_library",
"//pkg/util/version:go_default_library",
"//plugin/pkg/scheduler/algorithm:go_default_library",
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,201 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
const (
// KubeProxyConfigMap18 is the proxy ConfigMap manifest for Kubernetes version 1.8
KubeProxyConfigMap18 = `
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-proxy
namespace: kube-system
labels:
app: kube-proxy
data:
kubeconfig.conf: |
apiVersion: v1
kind: Config
clusters:
- cluster:
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
server: {{ .MasterEndpoint }}
name: default
contexts:
- context:
cluster: default
namespace: default
user: default
name: default
current-context: default
users:
- name: default
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
`
// KubeProxyConfigMap19 is the proxy ConfigMap manifest for Kubernetes 1.9 and above
KubeProxyConfigMap19 = `
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-proxy
namespace: kube-system
labels:
app: kube-proxy
data:
kubeconfig.conf: |-
apiVersion: v1
kind: Config
clusters:
- cluster:
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
server: {{ .MasterEndpoint }}
name: default
contexts:
- context:
cluster: default
namespace: default
user: default
name: default
current-context: default
users:
- name: default
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
config.conf: |-
{{ .ProxyConfig}}
`
// KubeProxyDaemonSet18 is the proxy DaemonSet manifest for Kubernetes version 1.8
KubeProxyDaemonSet18 = `
apiVersion: apps/v1beta2
kind: DaemonSet
metadata:
labels:
k8s-app: kube-proxy
name: kube-proxy
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-proxy
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
k8s-app: kube-proxy
spec:
containers:
- name: kube-proxy
image: {{ if .ImageOverride }}{{ .ImageOverride }}{{ else }}{{ .ImageRepository }}/kube-proxy-{{ .Arch }}:{{ .Version }}{{ end }}
imagePullPolicy: IfNotPresent
command:
- /usr/local/bin/kube-proxy
- --kubeconfig=/var/lib/kube-proxy/kubeconfig.conf
{{ .ClusterCIDR }}
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/lib/kube-proxy
name: kube-proxy
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /lib/modules
name: lib-modules
readOnly: true
hostNetwork: true
serviceAccountName: kube-proxy
tolerations:
- key: {{ .MasterTaintKey }}
effect: NoSchedule
- key: {{ .CloudTaintKey }}
value: "true"
effect: NoSchedule
volumes:
- name: kube-proxy
configMap:
name: kube-proxy
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: lib-modules
hostPath:
path: /lib/modules
`
// KubeProxyDaemonSet19 is the proxy DaemonSet manifest for Kubernetes 1.9 and above
KubeProxyDaemonSet19 = `
apiVersion: apps/v1beta2
kind: DaemonSet
metadata:
labels:
k8s-app: kube-proxy
name: kube-proxy
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-proxy
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
k8s-app: kube-proxy
spec:
containers:
- name: kube-proxy
image: {{ if .ImageOverride }}{{ .ImageOverride }}{{ else }}{{ .ImageRepository }}/kube-proxy-{{ .Arch }}:{{ .Version }}{{ end }}
imagePullPolicy: IfNotPresent
command:
- /usr/local/bin/kube-proxy
- --config=/var/lib/kube-proxy/config.conf
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/lib/kube-proxy
name: kube-proxy
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /lib/modules
name: lib-modules
readOnly: true
hostNetwork: true
serviceAccountName: kube-proxy
tolerations:
- key: {{ .MasterTaintKey }}
effect: NoSchedule
- key: {{ .CloudTaintKey }}
value: "true"
effect: NoSchedule
volumes:
- name: kube-proxy
configMap:
name: kube-proxy
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: lib-modules
hostPath:
path: /lib/modules
`
)

View File

@ -0,0 +1,191 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"fmt"
"runtime"
apps "k8s.io/api/apps/v1beta2"
"k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kuberuntime "k8s.io/apimachinery/pkg/runtime"
clientset "k8s.io/client-go/kubernetes"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
"k8s.io/kubernetes/pkg/api/legacyscheme"
kubeproxyconfigscheme "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme"
kubeproxyconfigv1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1"
"k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
)
const (
// KubeProxyClusterRoleName sets the name for the kube-proxy ClusterRole
// TODO: This k8s-generic, well-known constant should be fetchable from another source, not be in this package
KubeProxyClusterRoleName = "system:node-proxier"
// KubeProxyServiceAccountName describes the name of the ServiceAccount for the kube-proxy addon
KubeProxyServiceAccountName = "kube-proxy"
)
// EnsureProxyAddon creates the kube-proxy addons
func EnsureProxyAddon(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
if err := CreateServiceAccount(client); err != nil {
return fmt.Errorf("error when creating kube-proxy service account: %v", err)
}
// Generate Master Enpoint kubeconfig file
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(cfg)
if err != nil {
return err
}
proxyBytes, err := kubeadmutil.MarshalToYamlForCodecsWithShift(cfg.KubeProxy.Config, kubeproxyconfigv1alpha1.SchemeGroupVersion,
kubeproxyconfigscheme.Codecs)
if err != nil {
return fmt.Errorf("error when marshaling: %v", err)
}
// Parse the given kubernetes version
k8sVersion, err := version.ParseSemantic(cfg.KubernetesVersion)
if err != nil {
return fmt.Errorf("couldn't parse kubernetes version %q: %v", cfg.KubernetesVersion, err)
}
var proxyConfigMapBytes, proxyDaemonSetBytes []byte
if k8sVersion.AtLeast(kubeadmconstants.MinimumKubeProxyComponentConfigVersion) {
proxyConfigMapBytes, err = kubeadmutil.ParseTemplate(KubeProxyConfigMap19,
struct {
MasterEndpoint string
ProxyConfig string
}{
MasterEndpoint: masterEndpoint,
ProxyConfig: proxyBytes,
})
if err != nil {
return fmt.Errorf("error when parsing kube-proxy configmap template: %v", err)
}
proxyDaemonSetBytes, err = kubeadmutil.ParseTemplate(KubeProxyDaemonSet19, struct{ ImageRepository, Arch, Version, ImageOverride, ClusterCIDR, MasterTaintKey, CloudTaintKey string }{
ImageRepository: cfg.GetControlPlaneImageRepository(),
Arch: runtime.GOARCH,
Version: kubeadmutil.KubernetesVersionToImageTag(cfg.KubernetesVersion),
ImageOverride: cfg.UnifiedControlPlaneImage,
MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster,
CloudTaintKey: algorithm.TaintExternalCloudProvider,
})
if err != nil {
return fmt.Errorf("error when parsing kube-proxy daemonset template: %v", err)
}
} else {
proxyConfigMapBytes, err = kubeadmutil.ParseTemplate(KubeProxyConfigMap18,
struct {
MasterEndpoint string
}{
MasterEndpoint: masterEndpoint,
})
if err != nil {
return fmt.Errorf("error when parsing kube-proxy configmap template: %v", err)
}
proxyDaemonSetBytes, err = kubeadmutil.ParseTemplate(KubeProxyDaemonSet18, struct{ ImageRepository, Arch, Version, ImageOverride, ClusterCIDR, MasterTaintKey, CloudTaintKey string }{
ImageRepository: cfg.GetControlPlaneImageRepository(),
Arch: runtime.GOARCH,
Version: kubeadmutil.KubernetesVersionToImageTag(cfg.KubernetesVersion),
ImageOverride: cfg.UnifiedControlPlaneImage,
ClusterCIDR: getClusterCIDR(cfg.Networking.PodSubnet),
MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster,
CloudTaintKey: algorithm.TaintExternalCloudProvider,
})
if err != nil {
return fmt.Errorf("error when parsing kube-proxy daemonset template: %v", err)
}
}
if err := createKubeProxyAddon(proxyConfigMapBytes, proxyDaemonSetBytes, client); err != nil {
return err
}
if err := CreateRBACRules(client); err != nil {
return fmt.Errorf("error when creating kube-proxy RBAC rules: %v", err)
}
fmt.Println("[addons] Applied essential addon: kube-proxy")
return nil
}
// CreateServiceAccount creates the necessary serviceaccounts that kubeadm uses/might use, if they don't already exist.
func CreateServiceAccount(client clientset.Interface) error {
return apiclient.CreateOrUpdateServiceAccount(client, &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: KubeProxyServiceAccountName,
Namespace: metav1.NamespaceSystem,
},
})
}
// CreateRBACRules creates the essential RBAC rules for a minimally set-up cluster
func CreateRBACRules(client clientset.Interface) error {
return createClusterRoleBindings(client)
}
func createKubeProxyAddon(configMapBytes, daemonSetbytes []byte, client clientset.Interface) error {
kubeproxyConfigMap := &v1.ConfigMap{}
if err := kuberuntime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), configMapBytes, kubeproxyConfigMap); err != nil {
return fmt.Errorf("unable to decode kube-proxy configmap %v", err)
}
// Create the ConfigMap for kube-proxy or update it in case it already exists
if err := apiclient.CreateOrUpdateConfigMap(client, kubeproxyConfigMap); err != nil {
return err
}
kubeproxyDaemonSet := &apps.DaemonSet{}
if err := kuberuntime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), daemonSetbytes, kubeproxyDaemonSet); err != nil {
return fmt.Errorf("unable to decode kube-proxy daemonset %v", err)
}
// Create the DaemonSet for kube-proxy or update it in case it already exists
return apiclient.CreateOrUpdateDaemonSet(client, kubeproxyDaemonSet)
}
func createClusterRoleBindings(client clientset.Interface) error {
return apiclient.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "kubeadm:node-proxier",
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "ClusterRole",
Name: KubeProxyClusterRoleName,
},
Subjects: []rbac.Subject{
{
Kind: rbac.ServiceAccountKind,
Name: KubeProxyServiceAccountName,
Namespace: metav1.NamespaceSystem,
},
},
})
}
func getClusterCIDR(podsubnet string) string {
if len(podsubnet) == 0 {
return ""
}
return "- --cluster-cidr=" + podsubnet
}

View File

@ -0,0 +1,293 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"strings"
"testing"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clientsetfake "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config"
api "k8s.io/kubernetes/pkg/apis/core"
kubeproxyconfigv1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1"
"k8s.io/kubernetes/pkg/util/pointer"
)
func TestCreateServiceAccount(t *testing.T) {
tests := []struct {
name string
createErr error
expectErr bool
}{
{
"error-free case",
nil,
false,
},
{
"duplication errors should be ignored",
apierrors.NewAlreadyExists(api.Resource(""), ""),
false,
},
{
"unexpected errors should be returned",
apierrors.NewUnauthorized(""),
true,
},
}
for _, tc := range tests {
client := clientsetfake.NewSimpleClientset()
if tc.createErr != nil {
client.PrependReactor("create", "serviceaccounts", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, tc.createErr
})
}
err := CreateServiceAccount(client)
if tc.expectErr {
if err == nil {
t.Errorf("CreateServiceAccounts(%s) wanted err, got nil", tc.name)
}
continue
} else if !tc.expectErr && err != nil {
t.Errorf("CreateServiceAccounts(%s) returned unexpected err: %v", tc.name, err)
}
wantResourcesCreated := 1
if len(client.Actions()) != wantResourcesCreated {
t.Errorf("CreateServiceAccounts(%s) should have made %d actions, but made %d", tc.name, wantResourcesCreated, len(client.Actions()))
}
for _, action := range client.Actions() {
if action.GetVerb() != "create" || action.GetResource().Resource != "serviceaccounts" {
t.Errorf("CreateServiceAccounts(%s) called [%v %v], but wanted [create serviceaccounts]",
tc.name, action.GetVerb(), action.GetResource().Resource)
}
}
}
}
func TestGetClusterCIDR(t *testing.T) {
emptyClusterCIDR := getClusterCIDR("")
if emptyClusterCIDR != "" {
t.Errorf("Invalid format: %s", emptyClusterCIDR)
}
clusterCIDR := getClusterCIDR("10.244.0.0/16")
if clusterCIDR != "- --cluster-cidr=10.244.0.0/16" {
t.Errorf("Invalid format: %s", clusterCIDR)
}
clusterIPv6CIDR := getClusterCIDR("2001:db8::/64")
if clusterIPv6CIDR != "- --cluster-cidr=2001:db8::/64" {
t.Errorf("Invalid format: %s", clusterIPv6CIDR)
}
}
func TestCompileManifests(t *testing.T) {
var tests = []struct {
manifest string
data interface{}
expected bool
}{
{
manifest: KubeProxyConfigMap18,
data: struct {
MasterEndpoint, ProxyConfig string
}{
MasterEndpoint: "foo",
},
expected: true,
},
{
manifest: KubeProxyConfigMap19,
data: struct {
MasterEndpoint, ProxyConfig string
}{
MasterEndpoint: "foo",
ProxyConfig: " bindAddress: 0.0.0.0\n clusterCIDR: 192.168.1.1\n enableProfiling: false",
},
expected: true,
},
{
manifest: KubeProxyDaemonSet18,
data: struct{ ImageRepository, Arch, Version, ImageOverride, ClusterCIDR, MasterTaintKey, CloudTaintKey string }{
ImageRepository: "foo",
Arch: "foo",
Version: "foo",
ImageOverride: "foo",
ClusterCIDR: "foo",
MasterTaintKey: "foo",
CloudTaintKey: "foo",
},
expected: true,
},
{
manifest: KubeProxyDaemonSet19,
data: struct{ ImageRepository, Arch, Version, ImageOverride, MasterTaintKey, CloudTaintKey string }{
ImageRepository: "foo",
Arch: "foo",
Version: "foo",
ImageOverride: "foo",
MasterTaintKey: "foo",
CloudTaintKey: "foo",
},
expected: true,
},
}
for _, rt := range tests {
_, actual := kubeadmutil.ParseTemplate(rt.manifest, rt.data)
if (actual == nil) != rt.expected {
t.Errorf(
"failed to compile %s manifest:\n\texpected: %t\n\t actual: %t",
rt.manifest,
rt.expected,
(actual == nil),
)
}
}
}
func TestEnsureProxyAddon(t *testing.T) {
type SimulatedError int
const (
NoError SimulatedError = iota
ServiceAccountError
InvalidMasterEndpoint
IPv6SetBindAddress
)
var testCases = []struct {
name string
simError SimulatedError
expErrString string
expBindAddr string
expClusterCIDR string
}{
{
name: "Successful proxy addon",
simError: NoError,
expErrString: "",
expBindAddr: "0.0.0.0",
expClusterCIDR: "5.6.7.8/24",
}, {
name: "Simulated service account error",
simError: ServiceAccountError,
expErrString: "error when creating kube-proxy service account",
expBindAddr: "0.0.0.0",
expClusterCIDR: "5.6.7.8/24",
}, {
name: "IPv6 AdvertiseAddress address",
simError: IPv6SetBindAddress,
expErrString: "",
expBindAddr: "::",
expClusterCIDR: "2001:101::/96",
},
}
for _, tc := range testCases {
// Create a fake client and set up default test configuration
client := clientsetfake.NewSimpleClientset()
masterConfig := &kubeadmapiext.MasterConfiguration{
API: kubeadmapiext.API{
AdvertiseAddress: "1.2.3.4",
BindPort: 1234,
},
KubeProxy: kubeadmapiext.KubeProxy{
Config: &kubeproxyconfigv1alpha1.KubeProxyConfiguration{
BindAddress: "",
HealthzBindAddress: "0.0.0.0:10256",
MetricsBindAddress: "127.0.0.1:10249",
Conntrack: kubeproxyconfigv1alpha1.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
},
},
Networking: kubeadmapiext.Networking{
PodSubnet: "5.6.7.8/24",
},
ImageRepository: "someRepo",
KubernetesVersion: "v1.9.0",
UnifiedControlPlaneImage: "someImage",
}
// Simulate an error if neccessary
switch tc.simError {
case ServiceAccountError:
client.PrependReactor("create", "serviceaccounts", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewUnauthorized("")
})
case InvalidMasterEndpoint:
masterConfig.API.AdvertiseAddress = "1.2.3"
case IPv6SetBindAddress:
masterConfig.API.AdvertiseAddress = "1:2::3:4"
masterConfig.Networking.PodSubnet = "2001:101::/96"
}
kubeadmapiext.SetDefaults_MasterConfiguration(masterConfig)
intMaster, err := cmdutil.ConfigFileAndDefaultsToInternalConfig("", masterConfig)
if err != nil {
t.Errorf(" test failed to convert v1alpha1 to internal version")
break
}
err = EnsureProxyAddon(intMaster, client)
// Compare actual to expected errors
actErr := "No error"
if err != nil {
actErr = err.Error()
}
expErr := "No error"
if tc.expErrString != "" {
expErr = tc.expErrString
}
if !strings.Contains(actErr, expErr) {
t.Errorf(
"%s test failed, expected: %s, got: %s",
tc.name,
expErr,
actErr)
}
if intMaster.KubeProxy.Config.BindAddress != tc.expBindAddr {
t.Errorf("%s test failed, expected: %s, got: %s",
tc.name,
tc.expBindAddr,
intMaster.KubeProxy.Config.BindAddress)
}
if intMaster.KubeProxy.Config.ClusterCIDR != tc.expClusterCIDR {
t.Errorf("%s test failed, expected: %s, got: %s",
tc.name,
tc.expClusterCIDR,
intMaster.KubeProxy.Config.ClusterCIDR)
}
}
}

View File

@ -0,0 +1,52 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = ["clusterinfo_test.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo",
library = ":go_default_library",
deps = [
"//pkg/apis/core:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = ["clusterinfo.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo",
deps = [
"//cmd/kubeadm/app/util/apiclient:go_default_library",
"//pkg/apis/rbac/v1:go_default_library",
"//pkg/bootstrap/api:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,105 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clusterinfo
import (
"fmt"
"k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apiserver/pkg/authentication/user"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
rbachelper "k8s.io/kubernetes/pkg/apis/rbac/v1"
bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api"
)
const (
// BootstrapSignerClusterRoleName sets the name for the ClusterRole that allows access to ConfigMaps in the kube-public ns
BootstrapSignerClusterRoleName = "kubeadm:bootstrap-signer-clusterinfo"
)
// CreateBootstrapConfigMapIfNotExists creates the kube-public ConfigMap if it doesn't exist already
func CreateBootstrapConfigMapIfNotExists(client clientset.Interface, file string) error {
fmt.Printf("[bootstraptoken] Creating the %q ConfigMap in the %q namespace\n", bootstrapapi.ConfigMapClusterInfo, metav1.NamespacePublic)
adminConfig, err := clientcmd.LoadFromFile(file)
if err != nil {
return fmt.Errorf("failed to load admin kubeconfig [%v]", err)
}
adminCluster := adminConfig.Contexts[adminConfig.CurrentContext].Cluster
// Copy the cluster from admin.conf to the bootstrap kubeconfig, contains the CA cert and the server URL
bootstrapConfig := &clientcmdapi.Config{
Clusters: map[string]*clientcmdapi.Cluster{
"": adminConfig.Clusters[adminCluster],
},
}
bootstrapBytes, err := clientcmd.Write(*bootstrapConfig)
if err != nil {
return err
}
// Create or update the ConfigMap in the kube-public namespace
return apiclient.CreateOrUpdateConfigMap(client, &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: bootstrapapi.ConfigMapClusterInfo,
Namespace: metav1.NamespacePublic,
},
Data: map[string]string{
bootstrapapi.KubeConfigKey: string(bootstrapBytes),
},
})
}
// CreateClusterInfoRBACRules creates the RBAC rules for exposing the cluster-info ConfigMap in the kube-public namespace to unauthenticated users
func CreateClusterInfoRBACRules(client clientset.Interface) error {
err := apiclient.CreateOrUpdateRole(client, &rbac.Role{
ObjectMeta: metav1.ObjectMeta{
Name: BootstrapSignerClusterRoleName,
Namespace: metav1.NamespacePublic,
},
Rules: []rbac.PolicyRule{
rbachelper.NewRule("get").Groups("").Resources("configmaps").Names(bootstrapapi.ConfigMapClusterInfo).RuleOrDie(),
},
})
if err != nil {
return err
}
return apiclient.CreateOrUpdateRoleBinding(client, &rbac.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: BootstrapSignerClusterRoleName,
Namespace: metav1.NamespacePublic,
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "Role",
Name: BootstrapSignerClusterRoleName,
},
Subjects: []rbac.Subject{
{
Kind: rbac.UserKind,
Name: user.Anonymous,
},
},
})
}

View File

@ -0,0 +1,113 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clusterinfo
import (
"io/ioutil"
"os"
"testing"
"text/template"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
clientsetfake "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
api "k8s.io/kubernetes/pkg/apis/core"
)
var testConfigTempl = template.Must(template.New("test").Parse(`apiVersion: v1
clusters:
- cluster:
server: {{.Server}}
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubernetes-admin
name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin`))
func TestCreateBootstrapConfigMapIfNotExists(t *testing.T) {
tests := []struct {
name string
createErr error
updateErr error
expectErr bool
}{
{
"successful case should have no error",
nil,
nil,
false,
},
{
"if both create and update errors, return error",
apierrors.NewAlreadyExists(api.Resource("configmaps"), "test"),
apierrors.NewUnauthorized("go away!"),
true,
},
{
"unexpected error should be returned",
apierrors.NewUnauthorized("go away!"),
nil,
true,
},
}
servers := []struct {
Server string
}{
{Server: "https://10.128.0.6:6443"},
{Server: "https://[2001:db8::6]:3446"},
}
for _, server := range servers {
file, err := ioutil.TempFile("", "")
if err != nil {
t.Fatalf("could not create tempfile: %v", err)
}
defer os.Remove(file.Name())
if err := testConfigTempl.Execute(file, server); err != nil {
t.Fatalf("could not write to tempfile: %v", err)
}
if err := file.Close(); err != nil {
t.Fatalf("could not close tempfile: %v", err)
}
for _, tc := range tests {
client := clientsetfake.NewSimpleClientset()
if tc.createErr != nil {
client.PrependReactor("create", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, tc.createErr
})
}
err := CreateBootstrapConfigMapIfNotExists(client, file.Name())
if tc.expectErr && err == nil {
t.Errorf("CreateBootstrapConfigMapIfNotExists(%s) wanted error, got nil", tc.name)
} else if !tc.expectErr && err != nil {
t.Errorf("CreateBootstrapConfigMapIfNotExists(%s) returned unexpected error: %v", tc.name, err)
}
}
}
}

View File

@ -0,0 +1,48 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = ["token_test.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node",
library = ":go_default_library",
deps = ["//cmd/kubeadm/app/apis/kubeadm:go_default_library"],
)
go_library(
name = "go_default_library",
srcs = [
"tlsbootstrap.go",
"token.go",
],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node",
deps = [
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/util/apiclient:go_default_library",
"//cmd/kubeadm/app/util/token:go_default_library",
"//pkg/bootstrap/api:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,113 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"fmt"
rbac "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
)
const (
// NodeBootstrapperClusterRoleName defines the name of the auto-bootstrapped ClusterRole for letting someone post a CSR
// TODO: This value should be defined in an other, generic authz package instead of here
NodeBootstrapperClusterRoleName = "system:node-bootstrapper"
// NodeKubeletBootstrap defines the name of the ClusterRoleBinding that lets kubelets post CSRs
NodeKubeletBootstrap = "kubeadm:kubelet-bootstrap"
// CSRAutoApprovalClusterRoleName defines the name of the auto-bootstrapped ClusterRole for making the csrapprover controller auto-approve the CSR
// TODO: This value should be defined in an other, generic authz package instead of here
// Starting from v1.8, CSRAutoApprovalClusterRoleName is automatically created by the API server on startup
CSRAutoApprovalClusterRoleName = "system:certificates.k8s.io:certificatesigningrequests:nodeclient"
// NodeSelfCSRAutoApprovalClusterRoleName is a role defined in default 1.8 RBAC policies for automatic CSR approvals for automatically rotated node certificates
NodeSelfCSRAutoApprovalClusterRoleName = "system:certificates.k8s.io:certificatesigningrequests:selfnodeclient"
// NodeAutoApproveBootstrapClusterRoleBinding defines the name of the ClusterRoleBinding that makes the csrapprover approve node CSRs
NodeAutoApproveBootstrapClusterRoleBinding = "kubeadm:node-autoapprove-bootstrap"
// NodeAutoApproveCertificateRotationClusterRoleBinding defines name of the ClusterRoleBinding that makes the csrapprover approve node auto rotated CSRs
NodeAutoApproveCertificateRotationClusterRoleBinding = "kubeadm:node-autoapprove-certificate-rotation"
)
// AllowBootstrapTokensToPostCSRs creates RBAC rules in a way the makes Node Bootstrap Tokens able to post CSRs
func AllowBootstrapTokensToPostCSRs(client clientset.Interface) error {
fmt.Println("[bootstraptoken] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials")
return apiclient.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: NodeKubeletBootstrap,
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "ClusterRole",
Name: NodeBootstrapperClusterRoleName,
},
Subjects: []rbac.Subject{
{
Kind: rbac.GroupKind,
Name: constants.NodeBootstrapTokenAuthGroup,
},
},
})
}
// AutoApproveNodeBootstrapTokens creates RBAC rules in a way that makes Node Bootstrap Tokens' CSR auto-approved by the csrapprover controller
func AutoApproveNodeBootstrapTokens(client clientset.Interface) error {
fmt.Println("[bootstraptoken] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token")
// Always create this kubeadm-specific binding though
return apiclient.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: NodeAutoApproveBootstrapClusterRoleBinding,
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "ClusterRole",
Name: CSRAutoApprovalClusterRoleName,
},
Subjects: []rbac.Subject{
{
Kind: "Group",
Name: constants.NodeBootstrapTokenAuthGroup,
},
},
})
}
// AutoApproveNodeCertificateRotation creates RBAC rules in a way that makes Node certificate rotation CSR auto-approved by the csrapprover controller
func AutoApproveNodeCertificateRotation(client clientset.Interface) error {
fmt.Println("[bootstraptoken] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster")
return apiclient.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: NodeAutoApproveCertificateRotationClusterRoleBinding,
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "ClusterRole",
Name: NodeSelfCSRAutoApprovalClusterRoleName,
},
Subjects: []rbac.Subject{
{
Kind: "Group",
Name: constants.NodesGroup,
},
},
})
}

View File

@ -0,0 +1,125 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"fmt"
"strings"
"time"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
tokenutil "k8s.io/kubernetes/cmd/kubeadm/app/util/token"
bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api"
)
const tokenCreateRetries = 5
// TODO(mattmoyer): Move CreateNewToken, UpdateOrCreateToken and encodeTokenSecretData out of this package to client-go for a generic abstraction and client for a Bootstrap Token
// CreateNewToken tries to create a token and fails if one with the same ID already exists
func CreateNewToken(client clientset.Interface, token string, tokenDuration time.Duration, usages []string, extraGroups []string, description string) error {
return UpdateOrCreateToken(client, token, true, tokenDuration, usages, extraGroups, description)
}
// UpdateOrCreateToken attempts to update a token with the given ID, or create if it does not already exist.
func UpdateOrCreateToken(client clientset.Interface, token string, failIfExists bool, tokenDuration time.Duration, usages []string, extraGroups []string, description string) error {
tokenID, tokenSecret, err := tokenutil.ParseToken(token)
if err != nil {
return err
}
secretName := fmt.Sprintf("%s%s", bootstrapapi.BootstrapTokenSecretPrefix, tokenID)
var lastErr error
for i := 0; i < tokenCreateRetries; i++ {
secret, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Get(secretName, metav1.GetOptions{})
if err == nil {
if failIfExists {
return fmt.Errorf("a token with id %q already exists", tokenID)
}
// Secret with this ID already exists, update it:
tokenSecretData, err := encodeTokenSecretData(tokenID, tokenSecret, tokenDuration, usages, extraGroups, description)
if err != nil {
return err
}
secret.Data = tokenSecretData
if _, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Update(secret); err == nil {
return nil
}
lastErr = err
continue
}
// Secret does not already exist:
if apierrors.IsNotFound(err) {
tokenSecretData, err := encodeTokenSecretData(tokenID, tokenSecret, tokenDuration, usages, extraGroups, description)
if err != nil {
return err
}
secret = &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
},
Type: v1.SecretType(bootstrapapi.SecretTypeBootstrapToken),
Data: tokenSecretData,
}
if _, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret); err == nil {
return nil
}
lastErr = err
continue
}
}
return fmt.Errorf(
"unable to create bootstrap token after %d attempts [%v]",
tokenCreateRetries,
lastErr,
)
}
// encodeTokenSecretData takes the token discovery object and an optional duration and returns the .Data for the Secret
func encodeTokenSecretData(tokenID, tokenSecret string, duration time.Duration, usages []string, extraGroups []string, description string) (map[string][]byte, error) {
data := map[string][]byte{
bootstrapapi.BootstrapTokenIDKey: []byte(tokenID),
bootstrapapi.BootstrapTokenSecretKey: []byte(tokenSecret),
}
if len(extraGroups) > 0 {
data[bootstrapapi.BootstrapTokenExtraGroupsKey] = []byte(strings.Join(extraGroups, ","))
}
if duration > 0 {
// Get the current time, add the specified duration, and format it accordingly
durationString := time.Now().Add(duration).Format(time.RFC3339)
data[bootstrapapi.BootstrapTokenExpirationKey] = []byte(durationString)
}
if len(description) > 0 {
data[bootstrapapi.BootstrapTokenDescriptionKey] = []byte(description)
}
// validate usages
if err := bootstrapapi.ValidateUsages(usages); err != nil {
return nil, err
}
for _, usage := range usages {
data[bootstrapapi.BootstrapTokenUsagePrefix+usage] = []byte("true")
}
return data, nil
}

View File

@ -0,0 +1,59 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"bytes"
"testing"
"time"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
)
func TestEncodeTokenSecretData(t *testing.T) {
var tests = []struct {
token *kubeadmapi.TokenDiscovery
t time.Duration
}{
{token: &kubeadmapi.TokenDiscovery{ID: "foo", Secret: "bar"}}, // should use default
{token: &kubeadmapi.TokenDiscovery{ID: "foo", Secret: "bar"}, t: time.Second}, // should use default
}
for _, rt := range tests {
actual, _ := encodeTokenSecretData(rt.token.ID, rt.token.Secret, rt.t, []string{}, []string{}, "")
if !bytes.Equal(actual["token-id"], []byte(rt.token.ID)) {
t.Errorf(
"failed EncodeTokenSecretData:\n\texpected: %s\n\t actual: %s",
rt.token.ID,
actual["token-id"],
)
}
if !bytes.Equal(actual["token-secret"], []byte(rt.token.Secret)) {
t.Errorf(
"failed EncodeTokenSecretData:\n\texpected: %s\n\t actual: %s",
rt.token.Secret,
actual["token-secret"],
)
}
if rt.t > 0 {
if actual["expiration"] == nil {
t.Errorf(
"failed EncodeTokenSecretData, duration was not added to time",
)
}
}
}
}

View File

@ -0,0 +1,54 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = ["certs_test.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs",
library = ":go_default_library",
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/phases/certs/pkiutil:go_default_library",
"//cmd/kubeadm/test:go_default_library",
"//cmd/kubeadm/test/certs:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"certs.go",
"doc.go",
],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs",
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/phases/certs/pkiutil:go_default_library",
"//pkg/registry/core/service/ipallocator:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library",
"//vendor/k8s.io/client-go/util/cert:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//cmd/kubeadm/app/phases/certs/pkiutil:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -0,0 +1,552 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certs
import (
"crypto/rsa"
"crypto/x509"
"fmt"
"net"
"os"
"path/filepath"
"k8s.io/apimachinery/pkg/util/validation"
certutil "k8s.io/client-go/util/cert"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil"
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
)
// CreatePKIAssets will create and write to disk all PKI assets necessary to establish the control plane.
// If the PKI assets already exists in the target folder, they are used only if evaluated equal; otherwise an error is returned.
func CreatePKIAssets(cfg *kubeadmapi.MasterConfiguration) error {
certActions := []func(cfg *kubeadmapi.MasterConfiguration) error{
CreateCACertAndKeyfiles,
CreateAPIServerCertAndKeyFiles,
CreateAPIServerKubeletClientCertAndKeyFiles,
CreateServiceAccountKeyAndPublicKeyFiles,
CreateFrontProxyCACertAndKeyFiles,
CreateFrontProxyClientCertAndKeyFiles,
}
for _, action := range certActions {
err := action(cfg)
if err != nil {
return err
}
}
fmt.Printf("[certificates] Valid certificates and keys now exist in %q\n", cfg.CertificatesDir)
return nil
}
// CreateCACertAndKeyfiles create a new self signed CA certificate and key files.
// If the CA certificate and key files already exists in the target folder, they are used only if evaluated equal; otherwise an error is returned.
func CreateCACertAndKeyfiles(cfg *kubeadmapi.MasterConfiguration) error {
caCert, caKey, err := NewCACertAndKey()
if err != nil {
return err
}
return writeCertificateAuthorithyFilesIfNotExist(
cfg.CertificatesDir,
kubeadmconstants.CACertAndKeyBaseName,
caCert,
caKey,
)
}
// CreateAPIServerCertAndKeyFiles create a new certificate and key files for the apiserver.
// If the apiserver certificate and key files already exists in the target folder, they are used only if evaluated equal; otherwise an error is returned.
// It assumes the cluster CA certificate and key files should exists into the CertificatesDir
func CreateAPIServerCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
caCert, caKey, err := loadCertificateAuthorithy(cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName)
if err != nil {
return err
}
apiCert, apiKey, err := NewAPIServerCertAndKey(cfg, caCert, caKey)
if err != nil {
return err
}
return writeCertificateFilesIfNotExist(
cfg.CertificatesDir,
kubeadmconstants.APIServerCertAndKeyBaseName,
caCert,
apiCert,
apiKey,
)
}
// CreateAPIServerKubeletClientCertAndKeyFiles create a new CA certificate for kubelets calling apiserver
// If the apiserver-kubelet-client certificate and key files already exists in the target folder, they are used only if evaluated equals; otherwise an error is returned.
// It assumes the cluster CA certificate and key files should exists into the CertificatesDir
func CreateAPIServerKubeletClientCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
caCert, caKey, err := loadCertificateAuthorithy(cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName)
if err != nil {
return err
}
apiClientCert, apiClientKey, err := NewAPIServerKubeletClientCertAndKey(caCert, caKey)
if err != nil {
return err
}
return writeCertificateFilesIfNotExist(
cfg.CertificatesDir,
kubeadmconstants.APIServerKubeletClientCertAndKeyBaseName,
caCert,
apiClientCert,
apiClientKey,
)
}
// CreateServiceAccountKeyAndPublicKeyFiles create a new public/private key files for signing service account users.
// If the sa public/private key files already exists in the target folder, they are used only if evaluated equals; otherwise an error is returned.
func CreateServiceAccountKeyAndPublicKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
saSigningKey, err := NewServiceAccountSigningKey()
if err != nil {
return err
}
return writeKeyFilesIfNotExist(
cfg.CertificatesDir,
kubeadmconstants.ServiceAccountKeyBaseName,
saSigningKey,
)
}
// CreateFrontProxyCACertAndKeyFiles create a self signed front proxy CA certificate and key files.
// Front proxy CA and client certs are used to secure a front proxy authenticator which is used to assert identity
// without the client cert; This is a separte CA, so that front proxy identities cannot hit the API and normal client certs cannot be used
// as front proxies.
// If the front proxy CA certificate and key files already exists in the target folder, they are used only if evaluated equals; otherwise an error is returned.
func CreateFrontProxyCACertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
frontProxyCACert, frontProxyCAKey, err := NewFrontProxyCACertAndKey()
if err != nil {
return err
}
return writeCertificateAuthorithyFilesIfNotExist(
cfg.CertificatesDir,
kubeadmconstants.FrontProxyCACertAndKeyBaseName,
frontProxyCACert,
frontProxyCAKey,
)
}
// CreateFrontProxyClientCertAndKeyFiles create a new certificate for proxy server client.
// If the front-proxy-client certificate and key files already exists in the target folder, they are used only if evaluated equals; otherwise an error is returned.
// It assumes the front proxy CAA certificate and key files should exists into the CertificatesDir
func CreateFrontProxyClientCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
frontProxyCACert, frontProxyCAKey, err := loadCertificateAuthorithy(cfg.CertificatesDir, kubeadmconstants.FrontProxyCACertAndKeyBaseName)
if err != nil {
return err
}
frontProxyClientCert, frontProxyClientKey, err := NewFrontProxyClientCertAndKey(frontProxyCACert, frontProxyCAKey)
if err != nil {
return err
}
return writeCertificateFilesIfNotExist(
cfg.CertificatesDir,
kubeadmconstants.FrontProxyClientCertAndKeyBaseName,
frontProxyCACert,
frontProxyClientCert,
frontProxyClientKey,
)
}
// NewCACertAndKey will generate a self signed CA.
func NewCACertAndKey() (*x509.Certificate, *rsa.PrivateKey, error) {
caCert, caKey, err := pkiutil.NewCertificateAuthority()
if err != nil {
return nil, nil, fmt.Errorf("failure while generating CA certificate and key: %v", err)
}
return caCert, caKey, nil
}
// NewAPIServerCertAndKey generate CA certificate for apiserver, signed by the given CA.
func NewAPIServerCertAndKey(cfg *kubeadmapi.MasterConfiguration, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error) {
altNames, err := getAltNames(cfg)
if err != nil {
return nil, nil, fmt.Errorf("failure while composing altnames for API server: %v", err)
}
config := certutil.Config{
CommonName: kubeadmconstants.APIServerCertCommonName,
AltNames: *altNames,
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
}
apiCert, apiKey, err := pkiutil.NewCertAndKey(caCert, caKey, config)
if err != nil {
return nil, nil, fmt.Errorf("failure while creating API server key and certificate: %v", err)
}
return apiCert, apiKey, nil
}
// NewAPIServerKubeletClientCertAndKey generate CA certificate for the apiservers to connect to the kubelets securely, signed by the given CA.
func NewAPIServerKubeletClientCertAndKey(caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error) {
config := certutil.Config{
CommonName: kubeadmconstants.APIServerKubeletClientCertCommonName,
Organization: []string{kubeadmconstants.MastersGroup},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
}
apiClientCert, apiClientKey, err := pkiutil.NewCertAndKey(caCert, caKey, config)
if err != nil {
return nil, nil, fmt.Errorf("failure while creating API server kubelet client key and certificate: %v", err)
}
return apiClientCert, apiClientKey, nil
}
// NewServiceAccountSigningKey generate public/private key pairs for signing service account tokens.
func NewServiceAccountSigningKey() (*rsa.PrivateKey, error) {
// The key does NOT exist, let's generate it now
saSigningKey, err := certutil.NewPrivateKey()
if err != nil {
return nil, fmt.Errorf("failure while creating service account token signing key: %v", err)
}
return saSigningKey, nil
}
// NewFrontProxyCACertAndKey generate a self signed front proxy CA.
func NewFrontProxyCACertAndKey() (*x509.Certificate, *rsa.PrivateKey, error) {
frontProxyCACert, frontProxyCAKey, err := pkiutil.NewCertificateAuthority()
if err != nil {
return nil, nil, fmt.Errorf("failure while generating front-proxy CA certificate and key: %v", err)
}
return frontProxyCACert, frontProxyCAKey, nil
}
// NewFrontProxyClientCertAndKey generate CA certificate for proxy server client, signed by the given front proxy CA.
func NewFrontProxyClientCertAndKey(frontProxyCACert *x509.Certificate, frontProxyCAKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error) {
config := certutil.Config{
CommonName: kubeadmconstants.FrontProxyClientCertCommonName,
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
}
frontProxyClientCert, frontProxyClientKey, err := pkiutil.NewCertAndKey(frontProxyCACert, frontProxyCAKey, config)
if err != nil {
return nil, nil, fmt.Errorf("failure while creating front-proxy client key and certificate: %v", err)
}
return frontProxyClientCert, frontProxyClientKey, nil
}
// loadCertificateAuthorithy loads certificate authorithy
func loadCertificateAuthorithy(pkiDir string, baseName string) (*x509.Certificate, *rsa.PrivateKey, error) {
// Checks if certificate authorithy exists in the PKI directory
if !pkiutil.CertOrKeyExist(pkiDir, baseName) {
return nil, nil, fmt.Errorf("couldn't load %s certificate authorithy from %s", baseName, pkiDir)
}
// Try to load certificate authorithy .crt and .key from the PKI directory
caCert, caKey, err := pkiutil.TryLoadCertAndKeyFromDisk(pkiDir, baseName)
if err != nil {
return nil, nil, fmt.Errorf("failure loading %s certificate authorithy: %v", baseName, err)
}
// Make sure the loaded CA cert actually is a CA
if !caCert.IsCA {
return nil, nil, fmt.Errorf("%s certificate is not a certificate authorithy", baseName)
}
return caCert, caKey, nil
}
// writeCertificateAuthorithyFilesIfNotExist write a new certificate Authorithy to the given path.
// If there already is a certificate file at the given path; kubeadm tries to load it and check if the values in the
// existing and the expected certificate equals. If they do; kubeadm will just skip writing the file as it's up-to-date,
// otherwise this function returns an error.
func writeCertificateAuthorithyFilesIfNotExist(pkiDir string, baseName string, caCert *x509.Certificate, caKey *rsa.PrivateKey) error {
// If cert or key exists, we should try to load them
if pkiutil.CertOrKeyExist(pkiDir, baseName) {
// Try to load .crt and .key from the PKI directory
caCert, _, err := pkiutil.TryLoadCertAndKeyFromDisk(pkiDir, baseName)
if err != nil {
return fmt.Errorf("failure loading %s certificate: %v", baseName, err)
}
// Check if the existing cert is a CA
if !caCert.IsCA {
return fmt.Errorf("certificate %s is not a CA", baseName)
}
// kubeadm doesn't validate the existing certificate Authorithy more than this;
// Basically, if we find a certificate file with the same path; and it is a CA
// kubeadm thinks those files are equal and doesn't bother writing a new file
fmt.Printf("[certificates] Using the existing %s certificate and key.\n", baseName)
} else {
// Write .crt and .key files to disk
if err := pkiutil.WriteCertAndKey(pkiDir, baseName, caCert, caKey); err != nil {
return fmt.Errorf("failure while saving %s certificate and key: %v", baseName, err)
}
fmt.Printf("[certificates] Generated %s certificate and key.\n", baseName)
}
return nil
}
// writeCertificateFilesIfNotExist write a new certificate to the given path.
// If there already is a certificate file at the given path; kubeadm tries to load it and check if the values in the
// existing and the expected certificate equals. If they do; kubeadm will just skip writing the file as it's up-to-date,
// otherwise this function returns an error.
func writeCertificateFilesIfNotExist(pkiDir string, baseName string, signingCert *x509.Certificate, cert *x509.Certificate, key *rsa.PrivateKey) error {
// Checks if the signed certificate exists in the PKI directory
if pkiutil.CertOrKeyExist(pkiDir, baseName) {
// Try to load signed certificate .crt and .key from the PKI directory
signedCert, _, err := pkiutil.TryLoadCertAndKeyFromDisk(pkiDir, baseName)
if err != nil {
return fmt.Errorf("failure loading %s certificate: %v", baseName, err)
}
// Check if the existing cert is signed by the given CA
if err := signedCert.CheckSignatureFrom(signingCert); err != nil {
return fmt.Errorf("certificate %s is not signed by corresponding CA", baseName)
}
// kubeadm doesn't validate the existing certificate more than this;
// Basically, if we find a certificate file with the same path; and it is signed by
// the expected certificate authorithy, kubeadm thinks those files are equal and
// doesn't bother writing a new file
fmt.Printf("[certificates] Using the existing %s certificate and key.\n", baseName)
} else {
// Write .crt and .key files to disk
if err := pkiutil.WriteCertAndKey(pkiDir, baseName, cert, key); err != nil {
return fmt.Errorf("failure while saving %s certificate and key: %v", baseName, err)
}
fmt.Printf("[certificates] Generated %s certificate and key.\n", baseName)
if pkiutil.HasServerAuth(cert) {
fmt.Printf("[certificates] %s serving cert is signed for DNS names %v and IPs %v\n", baseName, cert.DNSNames, cert.IPAddresses)
}
}
return nil
}
// writeKeyFilesIfNotExist write a new key to the given path.
// If there already is a key file at the given path; kubeadm tries to load it and check if the values in the
// existing and the expected key equals. If they do; kubeadm will just skip writing the file as it's up-to-date,
// otherwise this function returns an error.
func writeKeyFilesIfNotExist(pkiDir string, baseName string, key *rsa.PrivateKey) error {
// Checks if the key exists in the PKI directory
if pkiutil.CertOrKeyExist(pkiDir, baseName) {
// Try to load .key from the PKI directory
_, err := pkiutil.TryLoadKeyFromDisk(pkiDir, baseName)
if err != nil {
return fmt.Errorf("%s key existed but it could not be loaded properly: %v", baseName, err)
}
// kubeadm doesn't validate the existing certificate key more than this;
// Basically, if we find a key file with the same path kubeadm thinks those files
// are equal and doesn't bother writing a new file
fmt.Printf("[certificates] Using the existing %s key.\n", baseName)
} else {
// Write .key and .pub files to disk
if err := pkiutil.WriteKey(pkiDir, baseName, key); err != nil {
return fmt.Errorf("failure while saving %s key: %v", baseName, err)
}
if err := pkiutil.WritePublicKey(pkiDir, baseName, &key.PublicKey); err != nil {
return fmt.Errorf("failure while saving %s public key: %v", baseName, err)
}
fmt.Printf("[certificates] Generated %s key and public key.\n", baseName)
}
return nil
}
type certKeyLocation struct {
pkiDir string
caBaseName string
baseName string
uxName string
}
// UsingExternalCA determines whether the user is relying on an external CA. We currently implicitly determine this is the case when the CA Cert
// is present but the CA Key is not. This allows us to, e.g., skip generating certs or not start the csr signing controller.
func UsingExternalCA(cfg *kubeadmapi.MasterConfiguration) (bool, error) {
if err := validateCACert(certKeyLocation{cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName, "", "CA"}); err != nil {
return false, err
}
caKeyPath := filepath.Join(cfg.CertificatesDir, kubeadmconstants.CAKeyName)
if _, err := os.Stat(caKeyPath); !os.IsNotExist(err) {
return false, fmt.Errorf("ca.key exists")
}
if err := validateSignedCert(certKeyLocation{cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName, kubeadmconstants.APIServerCertAndKeyBaseName, "API server"}); err != nil {
return false, err
}
if err := validateSignedCert(certKeyLocation{cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName, kubeadmconstants.APIServerKubeletClientCertAndKeyBaseName, "API server kubelet client"}); err != nil {
return false, err
}
if err := validatePrivatePublicKey(certKeyLocation{cfg.CertificatesDir, "", kubeadmconstants.ServiceAccountKeyBaseName, "service account"}); err != nil {
return false, err
}
if err := validateCACertAndKey(certKeyLocation{cfg.CertificatesDir, kubeadmconstants.FrontProxyCACertAndKeyBaseName, "", "front-proxy CA"}); err != nil {
return false, err
}
if err := validateSignedCert(certKeyLocation{cfg.CertificatesDir, kubeadmconstants.FrontProxyCACertAndKeyBaseName, kubeadmconstants.FrontProxyClientCertAndKeyBaseName, "front-proxy client"}); err != nil {
return false, err
}
return true, nil
}
// validateCACert tries to load a x509 certificate from pkiDir and validates that it is a CA
func validateCACert(l certKeyLocation) error {
// Check CA Cert
caCert, err := pkiutil.TryLoadCertFromDisk(l.pkiDir, l.caBaseName)
if err != nil {
return fmt.Errorf("failure loading certificate for %s: %v", l.uxName, err)
}
// Check if cert is a CA
if !caCert.IsCA {
return fmt.Errorf("certificate %s is not a CA", l.uxName)
}
return nil
}
// validateCACertAndKey tries to load a x509 certificate and private key from pkiDir,
// and validates that the cert is a CA
func validateCACertAndKey(l certKeyLocation) error {
if err := validateCACert(l); err != nil {
return err
}
_, err := pkiutil.TryLoadKeyFromDisk(l.pkiDir, l.caBaseName)
if err != nil {
return fmt.Errorf("failure loading key for %s: %v", l.uxName, err)
}
return nil
}
// validateSignedCert tries to load a x509 certificate and private key from pkiDir and validates
// that the cert is signed by a given CA
func validateSignedCert(l certKeyLocation) error {
// Try to load CA
caCert, err := pkiutil.TryLoadCertFromDisk(l.pkiDir, l.caBaseName)
if err != nil {
return fmt.Errorf("failure loading certificate authorithy for %s: %v", l.uxName, err)
}
// Try to load key and signed certificate
signedCert, _, err := pkiutil.TryLoadCertAndKeyFromDisk(l.pkiDir, l.baseName)
if err != nil {
return fmt.Errorf("failure loading certificate for %s: %v", l.uxName, err)
}
// Check if the cert is signed by the CA
if err := signedCert.CheckSignatureFrom(caCert); err != nil {
return fmt.Errorf("certificate %s is not signed by corresponding CA", l.uxName)
}
return nil
}
// validatePrivatePublicKey tries to load a private key from pkiDir
func validatePrivatePublicKey(l certKeyLocation) error {
// Try to load key
_, _, err := pkiutil.TryLoadPrivatePublicKeyFromDisk(l.pkiDir, l.baseName)
if err != nil {
return fmt.Errorf("failure loading key for %s: %v", l.uxName, err)
}
return nil
}
// getAltNames builds an AltNames object for to be used when generating apiserver certificate
func getAltNames(cfg *kubeadmapi.MasterConfiguration) (*certutil.AltNames, error) {
// advertise address
advertiseAddress := net.ParseIP(cfg.API.AdvertiseAddress)
if advertiseAddress == nil {
return nil, fmt.Errorf("error parsing API AdvertiseAddress %v: is not a valid textual representation of an IP address", cfg.API.AdvertiseAddress)
}
// internal IP address for the API server
_, svcSubnet, err := net.ParseCIDR(cfg.Networking.ServiceSubnet)
if err != nil {
return nil, fmt.Errorf("error parsing CIDR %q: %v", cfg.Networking.ServiceSubnet, err)
}
internalAPIServerVirtualIP, err := ipallocator.GetIndexedIP(svcSubnet, 1)
if err != nil {
return nil, fmt.Errorf("unable to get first IP address from the given CIDR (%s): %v", svcSubnet.String(), err)
}
// create AltNames with defaults DNSNames/IPs
altNames := &certutil.AltNames{
DNSNames: []string{
cfg.NodeName,
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
fmt.Sprintf("kubernetes.default.svc.%s", cfg.Networking.DNSDomain),
},
IPs: []net.IP{
internalAPIServerVirtualIP,
advertiseAddress,
},
}
// adds additional SAN
for _, altname := range cfg.APIServerCertSANs {
if ip := net.ParseIP(altname); ip != nil {
altNames.IPs = append(altNames.IPs, ip)
} else if len(validation.IsDNS1123Subdomain(altname)) == 0 {
altNames.DNSNames = append(altNames.DNSNames, altname)
}
}
return altNames, nil
}

View File

@ -0,0 +1,615 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certs
import (
"crypto/rsa"
"crypto/x509"
"fmt"
"net"
"os"
"path/filepath"
"testing"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil"
testutil "k8s.io/kubernetes/cmd/kubeadm/test"
certstestutil "k8s.io/kubernetes/cmd/kubeadm/test/certs"
)
func TestWriteCertificateAuthorithyFilesIfNotExist(t *testing.T) {
setupCert, setupKey, _ := NewCACertAndKey()
caCert, caKey, _ := NewCACertAndKey()
var tests = []struct {
setupFunc func(pkiDir string) error
expectedError bool
expectedCa *x509.Certificate
}{
{ // ca cert does not exists > ca written
expectedCa: caCert,
},
{ // ca cert exists, is ca > existing ca used
setupFunc: func(pkiDir string) error {
return writeCertificateAuthorithyFilesIfNotExist(pkiDir, "dummy", setupCert, setupKey)
},
expectedCa: setupCert,
},
{ // some file exists, but it is not a valid ca cert > err
setupFunc: func(pkiDir string) error {
testutil.SetupEmptyFiles(t, pkiDir, "dummy.crt")
return nil
},
expectedError: true,
},
{ // cert exists, but it is not a ca > err
setupFunc: func(pkiDir string) error {
cert, key, _ := NewFrontProxyClientCertAndKey(setupCert, setupKey)
return writeCertificateFilesIfNotExist(pkiDir, "dummy", setupCert, cert, key)
},
expectedError: true,
},
}
for _, test := range tests {
// Create temp folder for the test case
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
// executes setup func (if necessary)
if test.setupFunc != nil {
if err := test.setupFunc(tmpdir); err != nil {
t.Errorf("error executing setupFunc: %v", err)
continue
}
}
// executes create func
err := writeCertificateAuthorithyFilesIfNotExist(tmpdir, "dummy", caCert, caKey)
if !test.expectedError && err != nil {
t.Errorf("error writeCertificateAuthorithyFilesIfNotExist failed when not expected to fail: %v", err)
continue
} else if test.expectedError && err == nil {
t.Error("error writeCertificateAuthorithyFilesIfNotExist didn't failed when expected")
continue
} else if test.expectedError {
continue
}
// asserts expected files are there
testutil.AssertFileExists(t, tmpdir, "dummy.key", "dummy.crt")
// check created cert
resultingCaCert, _, err := pkiutil.TryLoadCertAndKeyFromDisk(tmpdir, "dummy")
if err != nil {
t.Errorf("failure reading created cert: %v", err)
continue
}
if !resultingCaCert.Equal(test.expectedCa) {
t.Error("created ca cert does not match expected ca cert")
}
}
}
func TestWriteCertificateFilesIfNotExist(t *testing.T) {
caCert, caKey, _ := NewFrontProxyCACertAndKey()
setupCert, setupKey, _ := NewFrontProxyClientCertAndKey(caCert, caKey)
cert, key, _ := NewFrontProxyClientCertAndKey(caCert, caKey)
var tests = []struct {
setupFunc func(pkiDir string) error
expectedError bool
expectedCert *x509.Certificate
}{
{ // cert does not exists > cert written
expectedCert: cert,
},
{ // cert exists, is signed by the same ca > existing cert used
setupFunc: func(pkiDir string) error {
return writeCertificateFilesIfNotExist(pkiDir, "dummy", caCert, setupCert, setupKey)
},
expectedCert: setupCert,
},
{ // some file exists, but it is not a valid cert > err
setupFunc: func(pkiDir string) error {
testutil.SetupEmptyFiles(t, pkiDir, "dummy.crt")
return nil
},
expectedError: true,
},
{ // cert exists, is signed by another ca > err
setupFunc: func(pkiDir string) error {
anotherCaCert, anotherCaKey, _ := NewFrontProxyCACertAndKey()
anotherCert, anotherKey, _ := NewFrontProxyClientCertAndKey(anotherCaCert, anotherCaKey)
return writeCertificateFilesIfNotExist(pkiDir, "dummy", anotherCaCert, anotherCert, anotherKey)
},
expectedError: true,
},
}
for _, test := range tests {
// Create temp folder for the test case
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
// executes setup func (if necessary)
if test.setupFunc != nil {
if err := test.setupFunc(tmpdir); err != nil {
t.Errorf("error executing setupFunc: %v", err)
continue
}
}
// executes create func
err := writeCertificateFilesIfNotExist(tmpdir, "dummy", caCert, cert, key)
if !test.expectedError && err != nil {
t.Errorf("error writeCertificateFilesIfNotExist failed when not expected to fail: %v", err)
continue
} else if test.expectedError && err == nil {
t.Error("error writeCertificateFilesIfNotExist didn't failed when expected")
continue
} else if test.expectedError {
continue
}
// asserts expected files are there
testutil.AssertFileExists(t, tmpdir, "dummy.key", "dummy.crt")
// check created cert
resultingCert, _, err := pkiutil.TryLoadCertAndKeyFromDisk(tmpdir, "dummy")
if err != nil {
t.Errorf("failure reading created cert: %v", err)
continue
}
if !resultingCert.Equal(test.expectedCert) {
t.Error("created cert does not match expected cert")
}
}
}
func TestWriteKeyFilesIfNotExist(t *testing.T) {
setupKey, _ := NewServiceAccountSigningKey()
key, _ := NewServiceAccountSigningKey()
var tests = []struct {
setupFunc func(pkiDir string) error
expectedError bool
expectedKey *rsa.PrivateKey
}{
{ // key does not exists > key written
expectedKey: key,
},
{ // key exists > existing key used
setupFunc: func(pkiDir string) error {
return writeKeyFilesIfNotExist(pkiDir, "dummy", setupKey)
},
expectedKey: setupKey,
},
{ // some file exists, but it is not a valid key > err
setupFunc: func(pkiDir string) error {
testutil.SetupEmptyFiles(t, pkiDir, "dummy.key")
return nil
},
expectedError: true,
},
}
for _, test := range tests {
// Create temp folder for the test case
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
// executes setup func (if necessary)
if test.setupFunc != nil {
if err := test.setupFunc(tmpdir); err != nil {
t.Errorf("error executing setupFunc: %v", err)
continue
}
}
// executes create func
err := writeKeyFilesIfNotExist(tmpdir, "dummy", key)
if !test.expectedError && err != nil {
t.Errorf("error writeKeyFilesIfNotExist failed when not expected to fail: %v", err)
continue
} else if test.expectedError && err == nil {
t.Error("error writeKeyFilesIfNotExist didn't failed when expected")
continue
} else if test.expectedError {
continue
}
// asserts expected files are there
testutil.AssertFileExists(t, tmpdir, "dummy.key", "dummy.pub")
// check created key
resultingKey, err := pkiutil.TryLoadKeyFromDisk(tmpdir, "dummy")
if err != nil {
t.Errorf("failure reading created key: %v", err)
continue
}
//TODO: check if there is a better method to compare keys
if resultingKey.D == key.D {
t.Error("created key does not match expected key")
}
}
}
func TestGetAltNames(t *testing.T) {
hostname := "valid-hostname"
advertiseIP := "1.2.3.4"
cfg := &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{AdvertiseAddress: advertiseIP},
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
NodeName: hostname,
}
altNames, err := getAltNames(cfg)
if err != nil {
t.Fatalf("failed calling getAltNames: %v", err)
}
expectedDNSNames := []string{hostname, "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster.local"}
for _, DNSName := range expectedDNSNames {
found := false
for _, val := range altNames.DNSNames {
if val == DNSName {
found = true
break
}
}
if !found {
t.Errorf("altNames does not contain DNSName %s", DNSName)
}
}
expectedIPAddresses := []string{"10.96.0.1", advertiseIP}
for _, IPAddress := range expectedIPAddresses {
found := false
for _, val := range altNames.IPs {
if val.Equal(net.ParseIP(IPAddress)) {
found = true
break
}
}
if !found {
t.Errorf("altNames does not contain IPAddress %s", IPAddress)
}
}
}
func TestNewCACertAndKey(t *testing.T) {
caCert, _, err := NewCACertAndKey()
if err != nil {
t.Fatalf("failed call NewCACertAndKey: %v", err)
}
certstestutil.AssertCertificateIsCa(t, caCert)
}
func TestNewAPIServerCertAndKey(t *testing.T) {
hostname := "valid-hostname"
advertiseAddresses := []string{"1.2.3.4", "1:2:3::4"}
for _, addr := range advertiseAddresses {
cfg := &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{AdvertiseAddress: addr},
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
NodeName: "valid-hostname",
}
caCert, caKey, err := NewCACertAndKey()
if err != nil {
t.Fatalf("failed creation of ca cert and key: %v", err)
}
apiServerCert, _, err := NewAPIServerCertAndKey(cfg, caCert, caKey)
if err != nil {
t.Fatalf("failed creation of cert and key: %v", err)
}
certstestutil.AssertCertificateIsSignedByCa(t, apiServerCert, caCert)
certstestutil.AssertCertificateHasServerAuthUsage(t, apiServerCert)
certstestutil.AssertCertificateHasDNSNames(t, apiServerCert, hostname, "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster.local")
certstestutil.AssertCertificateHasIPAddresses(t, apiServerCert, net.ParseIP("10.96.0.1"), net.ParseIP(addr))
}
}
func TestNewAPIServerKubeletClientCertAndKey(t *testing.T) {
caCert, caKey, err := NewCACertAndKey()
if err != nil {
t.Fatalf("failed creation of ca cert and key: %v", err)
}
apiClientCert, _, err := NewAPIServerKubeletClientCertAndKey(caCert, caKey)
if err != nil {
t.Fatalf("failed creation of cert and key: %v", err)
}
certstestutil.AssertCertificateIsSignedByCa(t, apiClientCert, caCert)
certstestutil.AssertCertificateHasClientAuthUsage(t, apiClientCert)
certstestutil.AssertCertificateHasOrganizations(t, apiClientCert, kubeadmconstants.MastersGroup)
}
func TestNewNewServiceAccountSigningKey(t *testing.T) {
key, err := NewServiceAccountSigningKey()
if err != nil {
t.Fatalf("failed creation of key: %v", err)
}
if key.N.BitLen() < 2048 {
t.Error("Service account signing key has less than 2048 bits size")
}
}
func TestNewFrontProxyCACertAndKey(t *testing.T) {
frontProxyCACert, _, err := NewFrontProxyCACertAndKey()
if err != nil {
t.Fatalf("failed creation of cert and key: %v", err)
}
certstestutil.AssertCertificateIsCa(t, frontProxyCACert)
}
func TestNewFrontProxyClientCertAndKey(t *testing.T) {
frontProxyCACert, frontProxyCAKey, err := NewFrontProxyCACertAndKey()
if err != nil {
t.Fatalf("failed creation of ca cert and key: %v", err)
}
frontProxyClientCert, _, err := NewFrontProxyClientCertAndKey(frontProxyCACert, frontProxyCAKey)
if err != nil {
t.Fatalf("failed creation of cert and key: %v", err)
}
certstestutil.AssertCertificateIsSignedByCa(t, frontProxyClientCert, frontProxyCACert)
certstestutil.AssertCertificateHasClientAuthUsage(t, frontProxyClientCert)
}
func TestUsingExternalCA(t *testing.T) {
tests := []struct {
setupFuncs []func(cfg *kubeadmapi.MasterConfiguration) error
expected bool
}{
{
setupFuncs: []func(cfg *kubeadmapi.MasterConfiguration) error{
CreatePKIAssets,
},
expected: false,
},
{
setupFuncs: []func(cfg *kubeadmapi.MasterConfiguration) error{
CreatePKIAssets,
deleteCAKey,
},
expected: true,
},
}
for _, test := range tests {
dir := testutil.SetupTempDir(t)
defer os.RemoveAll(dir)
cfg := &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"},
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
NodeName: "valid-hostname",
CertificatesDir: dir,
}
for _, f := range test.setupFuncs {
if err := f(cfg); err != nil {
t.Errorf("error executing setup function: %v", err)
}
}
if val, _ := UsingExternalCA(cfg); val != test.expected {
t.Errorf("UsingExternalCA did not match expected: %v", test.expected)
}
}
}
func TestValidateMethods(t *testing.T) {
tests := []struct {
name string
setupFuncs []func(cfg *kubeadmapi.MasterConfiguration) error
validateFunc func(l certKeyLocation) error
loc certKeyLocation
expectedSuccess bool
}{
{
name: "validateCACert",
setupFuncs: []func(cfg *kubeadmapi.MasterConfiguration) error{
CreateCACertAndKeyfiles,
},
validateFunc: validateCACert,
loc: certKeyLocation{caBaseName: "ca", baseName: "", uxName: "CA"},
expectedSuccess: true,
},
{
name: "validateCACertAndKey (files present)",
setupFuncs: []func(cfg *kubeadmapi.MasterConfiguration) error{
CreateCACertAndKeyfiles,
},
validateFunc: validateCACertAndKey,
loc: certKeyLocation{caBaseName: "ca", baseName: "", uxName: "CA"},
expectedSuccess: true,
},
{
name: "validateCACertAndKey (key missing)",
setupFuncs: []func(cfg *kubeadmapi.MasterConfiguration) error{
CreatePKIAssets,
deleteCAKey,
},
validateFunc: validateCACertAndKey,
loc: certKeyLocation{caBaseName: "ca", baseName: "", uxName: "CA"},
expectedSuccess: false,
},
{
name: "validateSignedCert",
setupFuncs: []func(cfg *kubeadmapi.MasterConfiguration) error{
CreateCACertAndKeyfiles,
CreateAPIServerCertAndKeyFiles,
},
validateFunc: validateSignedCert,
loc: certKeyLocation{caBaseName: "ca", baseName: "apiserver", uxName: "apiserver"},
expectedSuccess: true,
},
{
name: "validatePrivatePublicKey",
setupFuncs: []func(cfg *kubeadmapi.MasterConfiguration) error{
CreateServiceAccountKeyAndPublicKeyFiles,
},
validateFunc: validatePrivatePublicKey,
loc: certKeyLocation{baseName: "sa", uxName: "service account"},
expectedSuccess: true,
},
}
for _, test := range tests {
dir := testutil.SetupTempDir(t)
defer os.RemoveAll(dir)
test.loc.pkiDir = dir
cfg := &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"},
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
NodeName: "valid-hostname",
CertificatesDir: dir,
}
fmt.Println("Testing", test.name)
for _, f := range test.setupFuncs {
if err := f(cfg); err != nil {
t.Errorf("error executing setup function: %v", err)
}
}
err := test.validateFunc(test.loc)
if test.expectedSuccess && err != nil {
t.Errorf("expected success, error executing validateFunc: %v, %v", test.name, err)
} else if !test.expectedSuccess && err == nil {
t.Errorf("expected failure, no error executing validateFunc: %v", test.name)
}
}
}
func deleteCAKey(cfg *kubeadmapi.MasterConfiguration) error {
if err := os.Remove(filepath.Join(cfg.CertificatesDir, "ca.key")); err != nil {
return fmt.Errorf("failed removing ca.key: %v", err)
}
return nil
}
func assertIsCa(t *testing.T, cert *x509.Certificate) {
if !cert.IsCA {
t.Error("cert is not a valida CA")
}
}
func TestCreateCertificateFilesMethods(t *testing.T) {
var tests = []struct {
setupFunc func(cfg *kubeadmapi.MasterConfiguration) error
createFunc func(cfg *kubeadmapi.MasterConfiguration) error
expectedFiles []string
}{
{
createFunc: CreatePKIAssets,
expectedFiles: []string{
kubeadmconstants.CACertName, kubeadmconstants.CAKeyName,
kubeadmconstants.APIServerCertName, kubeadmconstants.APIServerKeyName,
kubeadmconstants.APIServerKubeletClientCertName, kubeadmconstants.APIServerKubeletClientKeyName,
kubeadmconstants.ServiceAccountPrivateKeyName, kubeadmconstants.ServiceAccountPublicKeyName,
kubeadmconstants.FrontProxyCACertName, kubeadmconstants.FrontProxyCAKeyName,
kubeadmconstants.FrontProxyClientCertName, kubeadmconstants.FrontProxyClientKeyName,
},
},
{
createFunc: CreateCACertAndKeyfiles,
expectedFiles: []string{kubeadmconstants.CACertName, kubeadmconstants.CAKeyName},
},
{
setupFunc: CreateCACertAndKeyfiles,
createFunc: CreateAPIServerCertAndKeyFiles,
expectedFiles: []string{kubeadmconstants.APIServerCertName, kubeadmconstants.APIServerKeyName},
},
{
setupFunc: CreateCACertAndKeyfiles,
createFunc: CreateAPIServerKubeletClientCertAndKeyFiles,
expectedFiles: []string{kubeadmconstants.APIServerKubeletClientCertName, kubeadmconstants.APIServerKubeletClientKeyName},
},
{
createFunc: CreateServiceAccountKeyAndPublicKeyFiles,
expectedFiles: []string{kubeadmconstants.ServiceAccountPrivateKeyName, kubeadmconstants.ServiceAccountPublicKeyName},
},
{
createFunc: CreateFrontProxyCACertAndKeyFiles,
expectedFiles: []string{kubeadmconstants.FrontProxyCACertName, kubeadmconstants.FrontProxyCAKeyName},
},
{
setupFunc: CreateFrontProxyCACertAndKeyFiles,
createFunc: CreateFrontProxyClientCertAndKeyFiles,
expectedFiles: []string{kubeadmconstants.FrontProxyCACertName, kubeadmconstants.FrontProxyCAKeyName},
},
}
for _, test := range tests {
// Create temp folder for the test case
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
cfg := &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"},
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
NodeName: "valid-hostname",
CertificatesDir: tmpdir,
}
// executes setup func (if necessary)
if test.setupFunc != nil {
if err := test.setupFunc(cfg); err != nil {
t.Errorf("error executing setupFunc: %v", err)
continue
}
}
// executes create func
if err := test.createFunc(cfg); err != nil {
t.Errorf("error executing createFunc: %v", err)
continue
}
// asserts expected files are there
testutil.AssertFileExists(t, tmpdir, test.expectedFiles...)
}
}

View File

@ -0,0 +1,46 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certs
/*
PHASE: CERTIFICATES
INPUTS:
From MasterConfiguration
.API.AdvertiseAddress is an optional parameter that can be passed for an extra addition to the SAN IPs
.APIServerCertSANs is needed for knowing which DNS names and IPs the API Server serving cert should be valid for
.Networking.DNSDomain is needed for knowing which DNS name the internal kubernetes service has
.Networking.ServiceSubnet is needed for knowing which IP the internal kubernetes service is going to point to
.CertificatesDir is required for knowing where all certificates should be stored
OUTPUTS:
Files to .CertificatesDir (default /etc/kubernetes/pki):
- ca.crt
- ca.key
- apiserver.crt
- apiserver.key
- apiserver-kubelet-client.crt
- apiserver-kubelet-client.key
- sa.pub
- sa.key
- front-proxy-ca.crt
- front-proxy-ca.key
- front-proxy-client.crt
- front-proxy-client.key
*/

View File

@ -0,0 +1,35 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = ["pki_helpers_test.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil",
library = ":go_default_library",
deps = ["//vendor/k8s.io/client-go/util/cert:go_default_library"],
)
go_library(
name = "go_default_library",
srcs = ["pki_helpers.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil",
deps = ["//vendor/k8s.io/client-go/util/cert:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,248 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pkiutil
import (
"crypto/rsa"
"crypto/x509"
"fmt"
"os"
"path/filepath"
"time"
certutil "k8s.io/client-go/util/cert"
)
// NewCertificateAuthority creates new certificate and private key for the certificate authority
func NewCertificateAuthority() (*x509.Certificate, *rsa.PrivateKey, error) {
key, err := certutil.NewPrivateKey()
if err != nil {
return nil, nil, fmt.Errorf("unable to create private key [%v]", err)
}
config := certutil.Config{
CommonName: "kubernetes",
}
cert, err := certutil.NewSelfSignedCACert(config, key)
if err != nil {
return nil, nil, fmt.Errorf("unable to create self-signed certificate [%v]", err)
}
return cert, key, nil
}
// NewCertAndKey creates new certificate and key by passing the certificate authority certificate and key
func NewCertAndKey(caCert *x509.Certificate, caKey *rsa.PrivateKey, config certutil.Config) (*x509.Certificate, *rsa.PrivateKey, error) {
key, err := certutil.NewPrivateKey()
if err != nil {
return nil, nil, fmt.Errorf("unable to create private key [%v]", err)
}
cert, err := certutil.NewSignedCert(config, key, caCert, caKey)
if err != nil {
return nil, nil, fmt.Errorf("unable to sign certificate [%v]", err)
}
return cert, key, nil
}
// HasServerAuth returns true if the given certificate is a ServerAuth
func HasServerAuth(cert *x509.Certificate) bool {
for i := range cert.ExtKeyUsage {
if cert.ExtKeyUsage[i] == x509.ExtKeyUsageServerAuth {
return true
}
}
return false
}
// WriteCertAndKey stores certificate and key at the specified location
func WriteCertAndKey(pkiPath string, name string, cert *x509.Certificate, key *rsa.PrivateKey) error {
if err := WriteKey(pkiPath, name, key); err != nil {
return err
}
return WriteCert(pkiPath, name, cert)
}
// WriteCert stores the given certificate at the given location
func WriteCert(pkiPath, name string, cert *x509.Certificate) error {
if cert == nil {
return fmt.Errorf("certificate cannot be nil when writing to file")
}
certificatePath := pathForCert(pkiPath, name)
if err := certutil.WriteCert(certificatePath, certutil.EncodeCertPEM(cert)); err != nil {
return fmt.Errorf("unable to write certificate to file %q: [%v]", certificatePath, err)
}
return nil
}
// WriteKey stores the given key at the given location
func WriteKey(pkiPath, name string, key *rsa.PrivateKey) error {
if key == nil {
return fmt.Errorf("private key cannot be nil when writing to file")
}
privateKeyPath := pathForKey(pkiPath, name)
if err := certutil.WriteKey(privateKeyPath, certutil.EncodePrivateKeyPEM(key)); err != nil {
return fmt.Errorf("unable to write private key to file %q: [%v]", privateKeyPath, err)
}
return nil
}
// WritePublicKey stores the given public key at the given location
func WritePublicKey(pkiPath, name string, key *rsa.PublicKey) error {
if key == nil {
return fmt.Errorf("public key cannot be nil when writing to file")
}
publicKeyBytes, err := certutil.EncodePublicKeyPEM(key)
if err != nil {
return err
}
publicKeyPath := pathForPublicKey(pkiPath, name)
if err := certutil.WriteKey(publicKeyPath, publicKeyBytes); err != nil {
return fmt.Errorf("unable to write public key to file %q: [%v]", publicKeyPath, err)
}
return nil
}
// CertOrKeyExist retuns a boolean whether the cert or the key exists
func CertOrKeyExist(pkiPath, name string) bool {
certificatePath, privateKeyPath := pathsForCertAndKey(pkiPath, name)
_, certErr := os.Stat(certificatePath)
_, keyErr := os.Stat(privateKeyPath)
if os.IsNotExist(certErr) && os.IsNotExist(keyErr) {
// The cert or the key did not exist
return false
}
// Both files exist or one of them
return true
}
// TryLoadCertAndKeyFromDisk tries to load a cert and a key from the disk and validates that they are valid
func TryLoadCertAndKeyFromDisk(pkiPath, name string) (*x509.Certificate, *rsa.PrivateKey, error) {
cert, err := TryLoadCertFromDisk(pkiPath, name)
if err != nil {
return nil, nil, err
}
key, err := TryLoadKeyFromDisk(pkiPath, name)
if err != nil {
return nil, nil, err
}
return cert, key, nil
}
// TryLoadCertFromDisk tries to load the cert from the disk and validates that it is valid
func TryLoadCertFromDisk(pkiPath, name string) (*x509.Certificate, error) {
certificatePath := pathForCert(pkiPath, name)
certs, err := certutil.CertsFromFile(certificatePath)
if err != nil {
return nil, fmt.Errorf("couldn't load the certificate file %s: %v", certificatePath, err)
}
// We are only putting one certificate in the certificate pem file, so it's safe to just pick the first one
// TODO: Support multiple certs here in order to be able to rotate certs
cert := certs[0]
// Check so that the certificate is valid now
now := time.Now()
if now.Before(cert.NotBefore) {
return nil, fmt.Errorf("the certificate is not valid yet")
}
if now.After(cert.NotAfter) {
return nil, fmt.Errorf("the certificate has expired")
}
return cert, nil
}
// TryLoadKeyFromDisk tries to load the key from the disk and validates that it is valid
func TryLoadKeyFromDisk(pkiPath, name string) (*rsa.PrivateKey, error) {
privateKeyPath := pathForKey(pkiPath, name)
// Parse the private key from a file
privKey, err := certutil.PrivateKeyFromFile(privateKeyPath)
if err != nil {
return nil, fmt.Errorf("couldn't load the private key file %s: %v", privateKeyPath, err)
}
// Allow RSA format only
var key *rsa.PrivateKey
switch k := privKey.(type) {
case *rsa.PrivateKey:
key = k
default:
return nil, fmt.Errorf("the private key file %s isn't in RSA format", privateKeyPath)
}
return key, nil
}
// TryLoadPrivatePublicKeyFromDisk tries to load the key from the disk and validates that it is valid
func TryLoadPrivatePublicKeyFromDisk(pkiPath, name string) (*rsa.PrivateKey, *rsa.PublicKey, error) {
privateKeyPath := pathForKey(pkiPath, name)
// Parse the private key from a file
privKey, err := certutil.PrivateKeyFromFile(privateKeyPath)
if err != nil {
return nil, nil, fmt.Errorf("couldn't load the private key file %s: %v", privateKeyPath, err)
}
publicKeyPath := pathForPublicKey(pkiPath, name)
// Parse the public key from a file
pubKeys, err := certutil.PublicKeysFromFile(publicKeyPath)
if err != nil {
return nil, nil, fmt.Errorf("couldn't load the public key file %s: %v", publicKeyPath, err)
}
// Allow RSA format only
k, ok := privKey.(*rsa.PrivateKey)
if !ok {
return nil, nil, fmt.Errorf("the private key file %s isn't in RSA format", privateKeyPath)
}
p := pubKeys[0].(*rsa.PublicKey)
return k, p, nil
}
func pathsForCertAndKey(pkiPath, name string) (string, string) {
return pathForCert(pkiPath, name), pathForKey(pkiPath, name)
}
func pathForCert(pkiPath, name string) string {
return filepath.Join(pkiPath, fmt.Sprintf("%s.crt", name))
}
func pathForKey(pkiPath, name string) string {
return filepath.Join(pkiPath, fmt.Sprintf("%s.key", name))
}
func pathForPublicKey(pkiPath, name string) string {
return filepath.Join(pkiPath, fmt.Sprintf("%s.pub", name))
}

View File

@ -0,0 +1,434 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pkiutil
import (
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"io/ioutil"
"os"
"testing"
certutil "k8s.io/client-go/util/cert"
)
func TestNewCertificateAuthority(t *testing.T) {
cert, key, err := NewCertificateAuthority()
if cert == nil {
t.Errorf(
"failed NewCertificateAuthority, cert == nil",
)
}
if key == nil {
t.Errorf(
"failed NewCertificateAuthority, key == nil",
)
}
if err != nil {
t.Errorf(
"failed NewCertificateAuthority with an error: %v",
err,
)
}
}
func TestNewCertAndKey(t *testing.T) {
var tests = []struct {
caKeySize int
expected bool
}{
{
// RSA key too small
caKeySize: 128,
expected: false,
},
{
// Should succeed
caKeySize: 2048,
expected: true,
},
}
for _, rt := range tests {
caKey, err := rsa.GenerateKey(rand.Reader, rt.caKeySize)
if err != nil {
t.Fatalf("Couldn't create rsa Private Key")
}
caCert := &x509.Certificate{}
config := certutil.Config{
CommonName: "test",
Organization: []string{"test"},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
}
_, _, actual := NewCertAndKey(caCert, caKey, config)
if (actual == nil) != rt.expected {
t.Errorf(
"failed NewCertAndKey:\n\texpected: %t\n\t actual: %t",
rt.expected,
(actual == nil),
)
}
}
}
func TestHasServerAuth(t *testing.T) {
caCert, caKey, _ := NewCertificateAuthority()
var tests = []struct {
config certutil.Config
expected bool
}{
{
config: certutil.Config{
CommonName: "test",
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
},
expected: true,
},
{
config: certutil.Config{
CommonName: "test",
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
},
expected: false,
},
}
for _, rt := range tests {
cert, _, err := NewCertAndKey(caCert, caKey, rt.config)
if err != nil {
t.Fatalf("Couldn't create cert: %v", err)
}
actual := HasServerAuth(cert)
if actual != rt.expected {
t.Errorf(
"failed HasServerAuth:\n\texpected: %t\n\t actual: %t",
rt.expected,
actual,
)
}
}
}
func TestWriteCertAndKey(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Couldn't create tmpdir")
}
defer os.RemoveAll(tmpdir)
caKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
t.Fatalf("Couldn't create rsa Private Key")
}
caCert := &x509.Certificate{}
actual := WriteCertAndKey(tmpdir, "foo", caCert, caKey)
if actual != nil {
t.Errorf(
"failed WriteCertAndKey with an error: %v",
actual,
)
}
}
func TestWriteCert(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Couldn't create tmpdir")
}
defer os.RemoveAll(tmpdir)
caCert := &x509.Certificate{}
actual := WriteCert(tmpdir, "foo", caCert)
if actual != nil {
t.Errorf(
"failed WriteCertAndKey with an error: %v",
actual,
)
}
}
func TestWriteKey(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Couldn't create tmpdir")
}
defer os.RemoveAll(tmpdir)
caKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
t.Fatalf("Couldn't create rsa Private Key")
}
actual := WriteKey(tmpdir, "foo", caKey)
if actual != nil {
t.Errorf(
"failed WriteCertAndKey with an error: %v",
actual,
)
}
}
func TestWritePublicKey(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Couldn't create tmpdir")
}
defer os.RemoveAll(tmpdir)
caKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
t.Fatalf("Couldn't create rsa Private Key")
}
actual := WritePublicKey(tmpdir, "foo", &caKey.PublicKey)
if actual != nil {
t.Errorf(
"failed WriteCertAndKey with an error: %v",
actual,
)
}
}
func TestCertOrKeyExist(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Couldn't create tmpdir")
}
defer os.RemoveAll(tmpdir)
caKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
t.Fatalf("Couldn't create rsa Private Key")
}
caCert := &x509.Certificate{}
actual := WriteCertAndKey(tmpdir, "foo", caCert, caKey)
if actual != nil {
t.Errorf(
"failed WriteCertAndKey with an error: %v",
actual,
)
}
var tests = []struct {
path string
name string
expected bool
}{
{
path: "",
name: "",
expected: false,
},
{
path: tmpdir,
name: "foo",
expected: true,
},
}
for _, rt := range tests {
actual := CertOrKeyExist(rt.path, rt.name)
if actual != rt.expected {
t.Errorf(
"failed CertOrKeyExist:\n\texpected: %t\n\t actual: %t",
rt.expected,
actual,
)
}
}
}
func TestTryLoadCertAndKeyFromDisk(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Couldn't create tmpdir")
}
defer os.RemoveAll(tmpdir)
caCert, caKey, err := NewCertificateAuthority()
if err != nil {
t.Errorf(
"failed to create cert and key with an error: %v",
err,
)
}
err = WriteCertAndKey(tmpdir, "foo", caCert, caKey)
if err != nil {
t.Errorf(
"failed to write cert and key with an error: %v",
err,
)
}
var tests = []struct {
path string
name string
expected bool
}{
{
path: "",
name: "",
expected: false,
},
{
path: tmpdir,
name: "foo",
expected: true,
},
}
for _, rt := range tests {
_, _, actual := TryLoadCertAndKeyFromDisk(rt.path, rt.name)
if (actual == nil) != rt.expected {
t.Errorf(
"failed TryLoadCertAndKeyFromDisk:\n\texpected: %t\n\t actual: %t",
rt.expected,
(actual == nil),
)
}
}
}
func TestTryLoadCertFromDisk(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Couldn't create tmpdir")
}
defer os.RemoveAll(tmpdir)
caCert, _, err := NewCertificateAuthority()
if err != nil {
t.Errorf(
"failed to create cert and key with an error: %v",
err,
)
}
err = WriteCert(tmpdir, "foo", caCert)
if err != nil {
t.Errorf(
"failed to write cert and key with an error: %v",
err,
)
}
var tests = []struct {
path string
name string
expected bool
}{
{
path: "",
name: "",
expected: false,
},
{
path: tmpdir,
name: "foo",
expected: true,
},
}
for _, rt := range tests {
_, actual := TryLoadCertFromDisk(rt.path, rt.name)
if (actual == nil) != rt.expected {
t.Errorf(
"failed TryLoadCertAndKeyFromDisk:\n\texpected: %t\n\t actual: %t",
rt.expected,
(actual == nil),
)
}
}
}
func TestTryLoadKeyFromDisk(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Couldn't create tmpdir")
}
defer os.RemoveAll(tmpdir)
_, caKey, err := NewCertificateAuthority()
if err != nil {
t.Errorf(
"failed to create cert and key with an error: %v",
err,
)
}
err = WriteKey(tmpdir, "foo", caKey)
if err != nil {
t.Errorf(
"failed to write cert and key with an error: %v",
err,
)
}
var tests = []struct {
path string
name string
expected bool
}{
{
path: "",
name: "",
expected: false,
},
{
path: tmpdir,
name: "foo",
expected: true,
},
}
for _, rt := range tests {
_, actual := TryLoadKeyFromDisk(rt.path, rt.name)
if (actual == nil) != rt.expected {
t.Errorf(
"failed TryLoadCertAndKeyFromDisk:\n\texpected: %t\n\t actual: %t",
rt.expected,
(actual == nil),
)
}
}
}
func TestPathsForCertAndKey(t *testing.T) {
crtPath, keyPath := pathsForCertAndKey("/foo", "bar")
if crtPath != "/foo/bar.crt" {
t.Errorf("unexpected certificate path: %s", crtPath)
}
if keyPath != "/foo/bar.key" {
t.Errorf("unexpected key path: %s", keyPath)
}
}
func TestPathForCert(t *testing.T) {
crtPath := pathForCert("/foo", "bar")
if crtPath != "/foo/bar.crt" {
t.Errorf("unexpected certificate path: %s", crtPath)
}
}
func TestPathForKey(t *testing.T) {
keyPath := pathForKey("/foo", "bar")
if keyPath != "/foo/bar.key" {
t.Errorf("unexpected certificate path: %s", keyPath)
}
}
func TestPathForPublicKey(t *testing.T) {
pubPath := pathForPublicKey("/foo", "bar")
if pubPath != "/foo/bar.pub" {
t.Errorf("unexpected certificate path: %s", pubPath)
}
}

View File

@ -0,0 +1,64 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = [
"manifests_test.go",
"volumes_test.go",
],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane",
library = ":go_default_library",
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/features:go_default_library",
"//cmd/kubeadm/app/phases/certs:go_default_library",
"//cmd/kubeadm/test:go_default_library",
"//pkg/master/reconcilers:go_default_library",
"//pkg/util/version:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"manifests.go",
"volumes.go",
],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane",
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/features:go_default_library",
"//cmd/kubeadm/app/images:go_default_library",
"//cmd/kubeadm/app/phases/certs:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
"//cmd/kubeadm/app/util/staticpod:go_default_library",
"//pkg/kubeapiserver/authorizer/modes:go_default_library",
"//pkg/master/reconcilers:go_default_library",
"//pkg/util/version:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,318 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controlplane
import (
"fmt"
"net"
"os"
"path/filepath"
"strings"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/features"
"k8s.io/kubernetes/cmd/kubeadm/app/images"
certphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
staticpodutil "k8s.io/kubernetes/cmd/kubeadm/app/util/staticpod"
authzmodes "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes"
"k8s.io/kubernetes/pkg/master/reconcilers"
"k8s.io/kubernetes/pkg/util/version"
)
// Static pod definitions in golang form are included below so that `kubeadm init` can get going.
const (
DefaultCloudConfigPath = "/etc/kubernetes/cloud-config"
defaultV18AdmissionControl = "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota"
deprecatedV19AdmissionControl = "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota"
defaultV19AdmissionControl = "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota"
)
// CreateInitStaticPodManifestFiles will write all static pod manifest files needed to bring up the control plane.
func CreateInitStaticPodManifestFiles(manifestDir string, cfg *kubeadmapi.MasterConfiguration) error {
return createStaticPodFiles(manifestDir, cfg, kubeadmconstants.KubeAPIServer, kubeadmconstants.KubeControllerManager, kubeadmconstants.KubeScheduler)
}
// CreateAPIServerStaticPodManifestFile will write APIserver static pod manifest file.
func CreateAPIServerStaticPodManifestFile(manifestDir string, cfg *kubeadmapi.MasterConfiguration) error {
return createStaticPodFiles(manifestDir, cfg, kubeadmconstants.KubeAPIServer)
}
// CreateControllerManagerStaticPodManifestFile will write controller manager static pod manifest file.
func CreateControllerManagerStaticPodManifestFile(manifestDir string, cfg *kubeadmapi.MasterConfiguration) error {
return createStaticPodFiles(manifestDir, cfg, kubeadmconstants.KubeControllerManager)
}
// CreateSchedulerStaticPodManifestFile will write scheduler static pod manifest file.
func CreateSchedulerStaticPodManifestFile(manifestDir string, cfg *kubeadmapi.MasterConfiguration) error {
return createStaticPodFiles(manifestDir, cfg, kubeadmconstants.KubeScheduler)
}
// GetStaticPodSpecs returns all staticPodSpecs actualized to the context of the current MasterConfiguration
// NB. this methods holds the information about how kubeadm creates static pod mainfests.
func GetStaticPodSpecs(cfg *kubeadmapi.MasterConfiguration, k8sVersion *version.Version) map[string]v1.Pod {
// Get the required hostpath mounts
mounts := getHostPathVolumesForTheControlPlane(cfg)
// Prepare static pod specs
staticPodSpecs := map[string]v1.Pod{
kubeadmconstants.KubeAPIServer: staticpodutil.ComponentPod(v1.Container{
Name: kubeadmconstants.KubeAPIServer,
Image: images.GetCoreImage(kubeadmconstants.KubeAPIServer, cfg.GetControlPlaneImageRepository(), cfg.KubernetesVersion, cfg.UnifiedControlPlaneImage),
Command: getAPIServerCommand(cfg, k8sVersion),
VolumeMounts: staticpodutil.VolumeMountMapToSlice(mounts.GetVolumeMounts(kubeadmconstants.KubeAPIServer)),
LivenessProbe: staticpodutil.ComponentProbe(cfg, kubeadmconstants.KubeAPIServer, int(cfg.API.BindPort), "/healthz", v1.URISchemeHTTPS),
Resources: staticpodutil.ComponentResources("250m"),
Env: getProxyEnvVars(),
}, mounts.GetVolumes(kubeadmconstants.KubeAPIServer)),
kubeadmconstants.KubeControllerManager: staticpodutil.ComponentPod(v1.Container{
Name: kubeadmconstants.KubeControllerManager,
Image: images.GetCoreImage(kubeadmconstants.KubeControllerManager, cfg.GetControlPlaneImageRepository(), cfg.KubernetesVersion, cfg.UnifiedControlPlaneImage),
Command: getControllerManagerCommand(cfg, k8sVersion),
VolumeMounts: staticpodutil.VolumeMountMapToSlice(mounts.GetVolumeMounts(kubeadmconstants.KubeControllerManager)),
LivenessProbe: staticpodutil.ComponentProbe(cfg, kubeadmconstants.KubeControllerManager, 10252, "/healthz", v1.URISchemeHTTP),
Resources: staticpodutil.ComponentResources("200m"),
Env: getProxyEnvVars(),
}, mounts.GetVolumes(kubeadmconstants.KubeControllerManager)),
kubeadmconstants.KubeScheduler: staticpodutil.ComponentPod(v1.Container{
Name: kubeadmconstants.KubeScheduler,
Image: images.GetCoreImage(kubeadmconstants.KubeScheduler, cfg.GetControlPlaneImageRepository(), cfg.KubernetesVersion, cfg.UnifiedControlPlaneImage),
Command: getSchedulerCommand(cfg),
VolumeMounts: staticpodutil.VolumeMountMapToSlice(mounts.GetVolumeMounts(kubeadmconstants.KubeScheduler)),
LivenessProbe: staticpodutil.ComponentProbe(cfg, kubeadmconstants.KubeScheduler, 10251, "/healthz", v1.URISchemeHTTP),
Resources: staticpodutil.ComponentResources("100m"),
Env: getProxyEnvVars(),
}, mounts.GetVolumes(kubeadmconstants.KubeScheduler)),
}
return staticPodSpecs
}
// createStaticPodFiles creates all the requested static pod files.
func createStaticPodFiles(manifestDir string, cfg *kubeadmapi.MasterConfiguration, componentNames ...string) error {
// TODO: Move the "pkg/util/version".Version object into the internal API instead of always parsing the string
k8sVersion, err := version.ParseSemantic(cfg.KubernetesVersion)
if err != nil {
return err
}
// gets the StaticPodSpecs, actualized for the current MasterConfiguration
specs := GetStaticPodSpecs(cfg, k8sVersion)
// creates required static pod specs
for _, componentName := range componentNames {
// retrives the StaticPodSpec for given component
spec, exists := specs[componentName]
if !exists {
return fmt.Errorf("couldn't retrive StaticPodSpec for %s", componentName)
}
// writes the StaticPodSpec to disk
if err := staticpodutil.WriteStaticPodToDisk(componentName, manifestDir, spec); err != nil {
return fmt.Errorf("failed to create static pod manifest file for %q: %v", componentName, err)
}
fmt.Printf("[controlplane] Wrote Static Pod manifest for component %s to %q\n", componentName, kubeadmconstants.GetStaticPodFilepath(componentName, manifestDir))
}
return nil
}
// getAPIServerCommand builds the right API server command from the given config object and version
func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration, k8sVersion *version.Version) []string {
defaultArguments := map[string]string{
"advertise-address": cfg.API.AdvertiseAddress,
"insecure-port": "0",
"admission-control": defaultV19AdmissionControl,
"service-cluster-ip-range": cfg.Networking.ServiceSubnet,
"service-account-key-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.ServiceAccountPublicKeyName),
"client-ca-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.CACertName),
"tls-cert-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.APIServerCertName),
"tls-private-key-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.APIServerKeyName),
"kubelet-client-certificate": filepath.Join(cfg.CertificatesDir, kubeadmconstants.APIServerKubeletClientCertName),
"kubelet-client-key": filepath.Join(cfg.CertificatesDir, kubeadmconstants.APIServerKubeletClientKeyName),
"enable-bootstrap-token-auth": "true",
"secure-port": fmt.Sprintf("%d", cfg.API.BindPort),
"allow-privileged": "true",
"kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
// add options to configure the front proxy. Without the generated client cert, this will never be useable
// so add it unconditionally with recommended values
"requestheader-username-headers": "X-Remote-User",
"requestheader-group-headers": "X-Remote-Group",
"requestheader-extra-headers-prefix": "X-Remote-Extra-",
"requestheader-client-ca-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.FrontProxyCACertName),
"requestheader-allowed-names": "front-proxy-client",
"proxy-client-cert-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.FrontProxyClientCertName),
"proxy-client-key-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.FrontProxyClientKeyName),
}
command := []string{"kube-apiserver"}
if k8sVersion.Minor() == 8 {
defaultArguments["admission-control"] = defaultV18AdmissionControl
}
if cfg.CloudProvider == "aws" || cfg.CloudProvider == "gce" {
defaultArguments["admission-control"] = deprecatedV19AdmissionControl
}
command = append(command, kubeadmutil.BuildArgumentListFromMap(defaultArguments, cfg.APIServerExtraArgs)...)
command = append(command, getAuthzParameters(cfg.AuthorizationModes)...)
// Check if the user decided to use an external etcd cluster
if len(cfg.Etcd.Endpoints) > 0 {
command = append(command, fmt.Sprintf("--etcd-servers=%s", strings.Join(cfg.Etcd.Endpoints, ",")))
} else {
command = append(command, "--etcd-servers=http://127.0.0.1:2379")
}
// Is etcd secured?
if cfg.Etcd.CAFile != "" {
command = append(command, fmt.Sprintf("--etcd-cafile=%s", cfg.Etcd.CAFile))
}
if cfg.Etcd.CertFile != "" && cfg.Etcd.KeyFile != "" {
etcdClientFileArg := fmt.Sprintf("--etcd-certfile=%s", cfg.Etcd.CertFile)
etcdKeyFileArg := fmt.Sprintf("--etcd-keyfile=%s", cfg.Etcd.KeyFile)
command = append(command, etcdClientFileArg, etcdKeyFileArg)
}
if cfg.CloudProvider != "" {
command = append(command, "--cloud-provider="+cfg.CloudProvider)
// Only append the --cloud-config option if there's a such file
if _, err := os.Stat(DefaultCloudConfigPath); err == nil {
command = append(command, "--cloud-config="+DefaultCloudConfigPath)
}
}
if features.Enabled(cfg.FeatureGates, features.HighAvailability) {
command = append(command, "--endpoint-reconciler-type="+reconcilers.LeaseEndpointReconcilerType)
}
if features.Enabled(cfg.FeatureGates, features.DynamicKubeletConfig) {
command = append(command, "--feature-gates=DynamicKubeletConfig=true")
}
return command
}
// getControllerManagerCommand builds the right controller manager command from the given config object and version
func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration, k8sVersion *version.Version) []string {
defaultArguments := map[string]string{
"address": "127.0.0.1",
"leader-elect": "true",
"kubeconfig": filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.ControllerManagerKubeConfigFileName),
"root-ca-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.CACertName),
"service-account-private-key-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.ServiceAccountPrivateKeyName),
"cluster-signing-cert-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.CACertName),
"cluster-signing-key-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.CAKeyName),
"use-service-account-credentials": "true",
"controllers": "*,bootstrapsigner,tokencleaner",
}
// If using external CA, pass empty string to controller manager instead of ca.key/ca.crt path,
// so that the csrsigning controller fails to start
if res, _ := certphase.UsingExternalCA(cfg); res {
defaultArguments["cluster-signing-key-file"] = ""
defaultArguments["cluster-signing-cert-file"] = ""
}
command := []string{"kube-controller-manager"}
command = append(command, kubeadmutil.BuildArgumentListFromMap(defaultArguments, cfg.ControllerManagerExtraArgs)...)
if cfg.CloudProvider != "" {
command = append(command, "--cloud-provider="+cfg.CloudProvider)
// Only append the --cloud-config option if there's a such file
if _, err := os.Stat(DefaultCloudConfigPath); err == nil {
command = append(command, "--cloud-config="+DefaultCloudConfigPath)
}
}
// Let the controller-manager allocate Node CIDRs for the Pod network.
// Each node will get a subspace of the address CIDR provided with --pod-network-cidr.
if cfg.Networking.PodSubnet != "" {
maskSize := "24"
if ip, _, err := net.ParseCIDR(cfg.Networking.PodSubnet); err == nil {
if ip.To4() == nil {
maskSize = "64"
}
}
command = append(command, "--allocate-node-cidrs=true", "--cluster-cidr="+cfg.Networking.PodSubnet,
"--node-cidr-mask-size="+maskSize)
}
return command
}
// getSchedulerCommand builds the right scheduler command from the given config object and version
func getSchedulerCommand(cfg *kubeadmapi.MasterConfiguration) []string {
defaultArguments := map[string]string{
"address": "127.0.0.1",
"leader-elect": "true",
"kubeconfig": filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.SchedulerKubeConfigFileName),
}
command := []string{"kube-scheduler"}
command = append(command, kubeadmutil.BuildArgumentListFromMap(defaultArguments, cfg.SchedulerExtraArgs)...)
return command
}
// getProxyEnvVars builds a list of environment variables to use in the control plane containers in order to use the right proxy
func getProxyEnvVars() []v1.EnvVar {
envs := []v1.EnvVar{}
for _, env := range os.Environ() {
pos := strings.Index(env, "=")
if pos == -1 {
// malformed environment variable, skip it.
continue
}
name := env[:pos]
value := env[pos+1:]
if strings.HasSuffix(strings.ToLower(name), "_proxy") && value != "" {
envVar := v1.EnvVar{Name: name, Value: value}
envs = append(envs, envVar)
}
}
return envs
}
// getAuthzParameters gets the authorization-related parameters to the api server
// At this point, we can assume the list of authorization modes is valid (due to that it has been validated in the API machinery code already)
// If the list is empty; it's defaulted (mostly for unit testing)
func getAuthzParameters(modes []string) []string {
command := []string{}
strset := sets.NewString(modes...)
if len(modes) == 0 {
return []string{fmt.Sprintf("--authorization-mode=%s", kubeadmapiext.DefaultAuthorizationModes)}
}
if strset.Has(authzmodes.ModeABAC) {
command = append(command, "--authorization-policy-file="+kubeadmconstants.AuthorizationPolicyPath)
}
if strset.Has(authzmodes.ModeWebhook) {
command = append(command, "--authorization-webhook-config-file="+kubeadmconstants.AuthorizationWebhookConfigPath)
}
command = append(command, "--authorization-mode="+strings.Join(modes, ","))
return command
}

View File

@ -0,0 +1,790 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controlplane
import (
"fmt"
"os"
"path/filepath"
"reflect"
"sort"
"testing"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/features"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
"k8s.io/kubernetes/pkg/master/reconcilers"
"k8s.io/kubernetes/pkg/util/version"
testutil "k8s.io/kubernetes/cmd/kubeadm/test"
)
const (
testCertsDir = "/var/lib/certs"
etcdDataDir = "/var/lib/etcd"
)
func TestGetStaticPodSpecs(t *testing.T) {
// Creates a Master Configuration
cfg := &kubeadmapi.MasterConfiguration{
KubernetesVersion: "v1.8.0",
}
// Executes GetStaticPodSpecs
// TODO: Move the "pkg/util/version".Version object into the internal API instead of always parsing the string
k8sVersion, _ := version.ParseSemantic(cfg.KubernetesVersion)
specs := GetStaticPodSpecs(cfg, k8sVersion)
var assertions = []struct {
staticPodName string
}{
{
staticPodName: kubeadmconstants.KubeAPIServer,
},
{
staticPodName: kubeadmconstants.KubeControllerManager,
},
{
staticPodName: kubeadmconstants.KubeScheduler,
},
}
for _, assertion := range assertions {
// assert the spec for the staticPodName exists
if spec, ok := specs[assertion.staticPodName]; ok {
// Assert each specs refers to the right pod
if spec.Spec.Containers[0].Name != assertion.staticPodName {
t.Errorf("getKubeConfigSpecs spec for %s contains pod %s, expectes %s", assertion.staticPodName, spec.Spec.Containers[0].Name, assertion.staticPodName)
}
} else {
t.Errorf("getStaticPodSpecs didn't create spec for %s ", assertion.staticPodName)
}
}
}
func TestCreateStaticPodFilesAndWrappers(t *testing.T) {
var tests = []struct {
createStaticPodFunction func(outDir string, cfg *kubeadmapi.MasterConfiguration) error
expectedFiles []string
}{
{ // CreateInitStaticPodManifestFiles
createStaticPodFunction: CreateInitStaticPodManifestFiles,
expectedFiles: []string{kubeadmconstants.KubeAPIServer, kubeadmconstants.KubeControllerManager, kubeadmconstants.KubeScheduler},
},
{ // CreateAPIServerStaticPodManifestFile
createStaticPodFunction: CreateAPIServerStaticPodManifestFile,
expectedFiles: []string{kubeadmconstants.KubeAPIServer},
},
{ // CreateControllerManagerStaticPodManifestFile
createStaticPodFunction: CreateControllerManagerStaticPodManifestFile,
expectedFiles: []string{kubeadmconstants.KubeControllerManager},
},
{ // CreateSchedulerStaticPodManifestFile
createStaticPodFunction: CreateSchedulerStaticPodManifestFile,
expectedFiles: []string{kubeadmconstants.KubeScheduler},
},
}
for _, test := range tests {
// Create temp folder for the test case
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
// Creates a Master Configuration
cfg := &kubeadmapi.MasterConfiguration{
KubernetesVersion: "v1.8.0",
}
// Execute createStaticPodFunction
manifestPath := filepath.Join(tmpdir, kubeadmconstants.ManifestsSubDirName)
err := test.createStaticPodFunction(manifestPath, cfg)
if err != nil {
t.Errorf("Error executing createStaticPodFunction: %v", err)
continue
}
// Assert expected files are there
testutil.AssertFilesCount(t, manifestPath, len(test.expectedFiles))
for _, fileName := range test.expectedFiles {
testutil.AssertFileExists(t, manifestPath, fileName+".yaml")
}
}
}
func TestGetAPIServerCommand(t *testing.T) {
var tests = []struct {
cfg *kubeadmapi.MasterConfiguration
expected []string
}{
{
cfg: &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "1.2.3.4"},
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.8.0",
},
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
"--tls-cert-file=" + testCertsDir + "/apiserver.crt",
"--tls-private-key-file=" + testCertsDir + "/apiserver.key",
"--kubelet-client-certificate=" + testCertsDir + "/apiserver-kubelet-client.crt",
"--kubelet-client-key=" + testCertsDir + "/apiserver-kubelet-client.key",
"--enable-bootstrap-token-auth=true",
"--secure-port=123",
"--allow-privileged=true",
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
"--proxy-client-cert-file=/var/lib/certs/front-proxy-client.crt",
"--proxy-client-key-file=/var/lib/certs/front-proxy-client.key",
"--requestheader-username-headers=X-Remote-User",
"--requestheader-group-headers=X-Remote-Group",
"--requestheader-extra-headers-prefix=X-Remote-Extra-",
"--requestheader-client-ca-file=" + testCertsDir + "/front-proxy-ca.crt",
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=1.2.3.4",
"--etcd-servers=http://127.0.0.1:2379",
},
},
{
cfg: &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "1.2.3.4"},
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.8.0-beta.0",
},
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
"--tls-cert-file=" + testCertsDir + "/apiserver.crt",
"--tls-private-key-file=" + testCertsDir + "/apiserver.key",
"--kubelet-client-certificate=" + testCertsDir + "/apiserver-kubelet-client.crt",
"--kubelet-client-key=" + testCertsDir + "/apiserver-kubelet-client.key",
"--secure-port=123",
"--allow-privileged=true",
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
"--enable-bootstrap-token-auth=true",
"--proxy-client-cert-file=/var/lib/certs/front-proxy-client.crt",
"--proxy-client-key-file=/var/lib/certs/front-proxy-client.key",
"--requestheader-username-headers=X-Remote-User",
"--requestheader-group-headers=X-Remote-Group",
"--requestheader-extra-headers-prefix=X-Remote-Extra-",
"--requestheader-client-ca-file=" + testCertsDir + "/front-proxy-ca.crt",
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=1.2.3.4",
"--etcd-servers=http://127.0.0.1:2379",
},
},
{
cfg: &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "4.3.2.1"},
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.8.1",
},
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
"--tls-cert-file=" + testCertsDir + "/apiserver.crt",
"--tls-private-key-file=" + testCertsDir + "/apiserver.key",
"--kubelet-client-certificate=" + testCertsDir + "/apiserver-kubelet-client.crt",
"--kubelet-client-key=" + testCertsDir + "/apiserver-kubelet-client.key",
"--enable-bootstrap-token-auth=true",
"--secure-port=123",
"--allow-privileged=true",
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
"--proxy-client-cert-file=/var/lib/certs/front-proxy-client.crt",
"--proxy-client-key-file=/var/lib/certs/front-proxy-client.key",
"--requestheader-username-headers=X-Remote-User",
"--requestheader-group-headers=X-Remote-Group",
"--requestheader-extra-headers-prefix=X-Remote-Extra-",
"--requestheader-client-ca-file=" + testCertsDir + "/front-proxy-ca.crt",
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=4.3.2.1",
"--etcd-servers=http://127.0.0.1:2379",
},
},
{
cfg: &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "4.3.2.1"},
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
Etcd: kubeadmapi.Etcd{CertFile: "fiz", KeyFile: "faz"},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.8.0",
},
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
"--tls-cert-file=" + testCertsDir + "/apiserver.crt",
"--tls-private-key-file=" + testCertsDir + "/apiserver.key",
"--kubelet-client-certificate=" + testCertsDir + "/apiserver-kubelet-client.crt",
"--kubelet-client-key=" + testCertsDir + "/apiserver-kubelet-client.key",
"--enable-bootstrap-token-auth=true",
"--secure-port=123",
"--allow-privileged=true",
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
"--proxy-client-cert-file=/var/lib/certs/front-proxy-client.crt",
"--proxy-client-key-file=/var/lib/certs/front-proxy-client.key",
"--requestheader-username-headers=X-Remote-User",
"--requestheader-group-headers=X-Remote-Group",
"--requestheader-extra-headers-prefix=X-Remote-Extra-",
"--requestheader-client-ca-file=" + testCertsDir + "/front-proxy-ca.crt",
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=4.3.2.1",
"--etcd-servers=http://127.0.0.1:2379",
"--etcd-certfile=fiz",
"--etcd-keyfile=faz",
},
},
{
cfg: &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "4.3.2.1"},
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
Etcd: kubeadmapi.Etcd{CertFile: "fiz", KeyFile: "faz"},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.8.3",
},
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
"--tls-cert-file=" + testCertsDir + "/apiserver.crt",
"--tls-private-key-file=" + testCertsDir + "/apiserver.key",
"--kubelet-client-certificate=" + testCertsDir + "/apiserver-kubelet-client.crt",
"--kubelet-client-key=" + testCertsDir + "/apiserver-kubelet-client.key",
"--enable-bootstrap-token-auth=true",
fmt.Sprintf("--secure-port=%d", 123),
"--allow-privileged=true",
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
"--proxy-client-cert-file=/var/lib/certs/front-proxy-client.crt",
"--proxy-client-key-file=/var/lib/certs/front-proxy-client.key",
"--requestheader-username-headers=X-Remote-User",
"--requestheader-group-headers=X-Remote-Group",
"--requestheader-extra-headers-prefix=X-Remote-Extra-",
"--requestheader-client-ca-file=" + testCertsDir + "/front-proxy-ca.crt",
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=4.3.2.1",
"--etcd-servers=http://127.0.0.1:2379",
"--etcd-certfile=fiz",
"--etcd-keyfile=faz",
},
},
{
cfg: &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "2001:db8::1"},
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
Etcd: kubeadmapi.Etcd{CertFile: "fiz", KeyFile: "faz"},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.8.0",
},
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
"--tls-cert-file=" + testCertsDir + "/apiserver.crt",
"--tls-private-key-file=" + testCertsDir + "/apiserver.key",
"--kubelet-client-certificate=" + testCertsDir + "/apiserver-kubelet-client.crt",
"--kubelet-client-key=" + testCertsDir + "/apiserver-kubelet-client.key",
"--enable-bootstrap-token-auth=true",
fmt.Sprintf("--secure-port=%d", 123),
"--allow-privileged=true",
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
"--proxy-client-cert-file=/var/lib/certs/front-proxy-client.crt",
"--proxy-client-key-file=/var/lib/certs/front-proxy-client.key",
"--requestheader-username-headers=X-Remote-User",
"--requestheader-group-headers=X-Remote-Group",
"--requestheader-extra-headers-prefix=X-Remote-Extra-",
"--requestheader-client-ca-file=" + testCertsDir + "/front-proxy-ca.crt",
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=2001:db8::1",
"--etcd-servers=http://127.0.0.1:2379",
"--etcd-certfile=fiz",
"--etcd-keyfile=faz",
},
},
{
cfg: &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "2001:db8::1"},
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
Etcd: kubeadmapi.Etcd{CertFile: "fiz", KeyFile: "faz"},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.9.0-beta.0",
},
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
"--tls-cert-file=" + testCertsDir + "/apiserver.crt",
"--tls-private-key-file=" + testCertsDir + "/apiserver.key",
"--kubelet-client-certificate=" + testCertsDir + "/apiserver-kubelet-client.crt",
"--kubelet-client-key=" + testCertsDir + "/apiserver-kubelet-client.key",
fmt.Sprintf("--secure-port=%d", 123),
"--allow-privileged=true",
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
"--enable-bootstrap-token-auth=true",
"--proxy-client-cert-file=/var/lib/certs/front-proxy-client.crt",
"--proxy-client-key-file=/var/lib/certs/front-proxy-client.key",
"--requestheader-username-headers=X-Remote-User",
"--requestheader-group-headers=X-Remote-Group",
"--requestheader-extra-headers-prefix=X-Remote-Extra-",
"--requestheader-client-ca-file=" + testCertsDir + "/front-proxy-ca.crt",
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=2001:db8::1",
"--etcd-servers=http://127.0.0.1:2379",
"--etcd-certfile=fiz",
"--etcd-keyfile=faz",
},
},
{
cfg: &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "2001:db8::1"},
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
FeatureGates: map[string]bool{features.HighAvailability: true},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.9.0-beta.0",
},
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
"--tls-cert-file=" + testCertsDir + "/apiserver.crt",
"--tls-private-key-file=" + testCertsDir + "/apiserver.key",
"--kubelet-client-certificate=" + testCertsDir + "/apiserver-kubelet-client.crt",
"--kubelet-client-key=" + testCertsDir + "/apiserver-kubelet-client.key",
fmt.Sprintf("--secure-port=%d", 123),
"--allow-privileged=true",
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
"--enable-bootstrap-token-auth=true",
"--proxy-client-cert-file=/var/lib/certs/front-proxy-client.crt",
"--proxy-client-key-file=/var/lib/certs/front-proxy-client.key",
"--requestheader-username-headers=X-Remote-User",
"--requestheader-group-headers=X-Remote-Group",
"--requestheader-extra-headers-prefix=X-Remote-Extra-",
"--requestheader-client-ca-file=" + testCertsDir + "/front-proxy-ca.crt",
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=2001:db8::1",
"--etcd-servers=http://127.0.0.1:2379",
fmt.Sprintf("--endpoint-reconciler-type=%s", reconcilers.LeaseEndpointReconcilerType),
},
},
{
cfg: &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "1.2.3.4"},
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.9.0-beta.0",
CloudProvider: "gce",
},
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
"--tls-cert-file=" + testCertsDir + "/apiserver.crt",
"--tls-private-key-file=" + testCertsDir + "/apiserver.key",
"--kubelet-client-certificate=" + testCertsDir + "/apiserver-kubelet-client.crt",
"--kubelet-client-key=" + testCertsDir + "/apiserver-kubelet-client.key",
"--enable-bootstrap-token-auth=true",
"--secure-port=123",
"--allow-privileged=true",
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
"--proxy-client-cert-file=/var/lib/certs/front-proxy-client.crt",
"--proxy-client-key-file=/var/lib/certs/front-proxy-client.key",
"--requestheader-username-headers=X-Remote-User",
"--requestheader-group-headers=X-Remote-Group",
"--requestheader-extra-headers-prefix=X-Remote-Extra-",
"--requestheader-client-ca-file=" + testCertsDir + "/front-proxy-ca.crt",
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=1.2.3.4",
"--etcd-servers=http://127.0.0.1:2379",
"--cloud-provider=gce",
},
},
{
cfg: &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "1.2.3.4"},
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.9.0-beta.0",
CloudProvider: "aws",
},
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
"--tls-cert-file=" + testCertsDir + "/apiserver.crt",
"--tls-private-key-file=" + testCertsDir + "/apiserver.key",
"--kubelet-client-certificate=" + testCertsDir + "/apiserver-kubelet-client.crt",
"--kubelet-client-key=" + testCertsDir + "/apiserver-kubelet-client.key",
"--enable-bootstrap-token-auth=true",
"--secure-port=123",
"--allow-privileged=true",
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
"--proxy-client-cert-file=/var/lib/certs/front-proxy-client.crt",
"--proxy-client-key-file=/var/lib/certs/front-proxy-client.key",
"--requestheader-username-headers=X-Remote-User",
"--requestheader-group-headers=X-Remote-Group",
"--requestheader-extra-headers-prefix=X-Remote-Extra-",
"--requestheader-client-ca-file=" + testCertsDir + "/front-proxy-ca.crt",
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=1.2.3.4",
"--etcd-servers=http://127.0.0.1:2379",
"--cloud-provider=aws",
},
},
}
for _, rt := range tests {
actual := getAPIServerCommand(rt.cfg, version.MustParseSemantic(rt.cfg.KubernetesVersion))
sort.Strings(actual)
sort.Strings(rt.expected)
if !reflect.DeepEqual(actual, rt.expected) {
t.Errorf("failed getAPIServerCommand:\nexpected:\n%v\nsaw:\n%v", rt.expected, actual)
}
}
}
func TestGetControllerManagerCommand(t *testing.T) {
var tests = []struct {
cfg *kubeadmapi.MasterConfiguration
expected []string
}{
{
cfg: &kubeadmapi.MasterConfiguration{
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.7.0",
},
expected: []string{
"kube-controller-manager",
"--address=127.0.0.1",
"--leader-elect=true",
"--kubeconfig=" + kubeadmconstants.KubernetesDir + "/controller-manager.conf",
"--root-ca-file=" + testCertsDir + "/ca.crt",
"--service-account-private-key-file=" + testCertsDir + "/sa.key",
"--cluster-signing-cert-file=" + testCertsDir + "/ca.crt",
"--cluster-signing-key-file=" + testCertsDir + "/ca.key",
"--use-service-account-credentials=true",
"--controllers=*,bootstrapsigner,tokencleaner",
},
},
{
cfg: &kubeadmapi.MasterConfiguration{
CloudProvider: "foo",
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.7.0",
},
expected: []string{
"kube-controller-manager",
"--address=127.0.0.1",
"--leader-elect=true",
"--kubeconfig=" + kubeadmconstants.KubernetesDir + "/controller-manager.conf",
"--root-ca-file=" + testCertsDir + "/ca.crt",
"--service-account-private-key-file=" + testCertsDir + "/sa.key",
"--cluster-signing-cert-file=" + testCertsDir + "/ca.crt",
"--cluster-signing-key-file=" + testCertsDir + "/ca.key",
"--use-service-account-credentials=true",
"--controllers=*,bootstrapsigner,tokencleaner",
"--cloud-provider=foo",
},
},
{
cfg: &kubeadmapi.MasterConfiguration{
Networking: kubeadmapi.Networking{PodSubnet: "10.0.1.15/16"},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.7.0",
},
expected: []string{
"kube-controller-manager",
"--address=127.0.0.1",
"--leader-elect=true",
"--kubeconfig=" + kubeadmconstants.KubernetesDir + "/controller-manager.conf",
"--root-ca-file=" + testCertsDir + "/ca.crt",
"--service-account-private-key-file=" + testCertsDir + "/sa.key",
"--cluster-signing-cert-file=" + testCertsDir + "/ca.crt",
"--cluster-signing-key-file=" + testCertsDir + "/ca.key",
"--use-service-account-credentials=true",
"--controllers=*,bootstrapsigner,tokencleaner",
"--allocate-node-cidrs=true",
"--cluster-cidr=10.0.1.15/16",
"--node-cidr-mask-size=24",
},
},
{
cfg: &kubeadmapi.MasterConfiguration{
Networking: kubeadmapi.Networking{PodSubnet: "2001:101:115::/48"},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.7.0",
},
expected: []string{
"kube-controller-manager",
"--address=127.0.0.1",
"--leader-elect=true",
"--kubeconfig=" + kubeadmconstants.KubernetesDir + "/controller-manager.conf",
"--root-ca-file=" + testCertsDir + "/ca.crt",
"--service-account-private-key-file=" + testCertsDir + "/sa.key",
"--cluster-signing-cert-file=" + testCertsDir + "/ca.crt",
"--cluster-signing-key-file=" + testCertsDir + "/ca.key",
"--use-service-account-credentials=true",
"--controllers=*,bootstrapsigner,tokencleaner",
"--allocate-node-cidrs=true",
"--cluster-cidr=2001:101:115::/48",
"--node-cidr-mask-size=64",
},
},
}
for _, rt := range tests {
actual := getControllerManagerCommand(rt.cfg, version.MustParseSemantic(rt.cfg.KubernetesVersion))
sort.Strings(actual)
sort.Strings(rt.expected)
if !reflect.DeepEqual(actual, rt.expected) {
t.Errorf("failed getControllerManagerCommand:\nexpected:\n%v\nsaw:\n%v", rt.expected, actual)
}
}
}
func TestGetControllerManagerCommandExternalCA(t *testing.T) {
tests := []struct {
cfg *kubeadmapi.MasterConfiguration
caKeyPresent bool
expectedArgFunc func(dir string) []string
}{
{
cfg: &kubeadmapi.MasterConfiguration{
KubernetesVersion: "v1.7.0",
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"},
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
NodeName: "valid-hostname",
},
caKeyPresent: false,
expectedArgFunc: func(tmpdir string) []string {
return []string{
"kube-controller-manager",
"--address=127.0.0.1",
"--leader-elect=true",
"--kubeconfig=" + kubeadmconstants.KubernetesDir + "/controller-manager.conf",
"--root-ca-file=" + tmpdir + "/ca.crt",
"--service-account-private-key-file=" + tmpdir + "/sa.key",
"--cluster-signing-cert-file=",
"--cluster-signing-key-file=",
"--use-service-account-credentials=true",
"--controllers=*,bootstrapsigner,tokencleaner",
}
},
},
{
cfg: &kubeadmapi.MasterConfiguration{
KubernetesVersion: "v1.7.0",
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"},
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
NodeName: "valid-hostname",
},
caKeyPresent: true,
expectedArgFunc: func(tmpdir string) []string {
return []string{
"kube-controller-manager",
"--address=127.0.0.1",
"--leader-elect=true",
"--kubeconfig=" + kubeadmconstants.KubernetesDir + "/controller-manager.conf",
"--root-ca-file=" + tmpdir + "/ca.crt",
"--service-account-private-key-file=" + tmpdir + "/sa.key",
"--cluster-signing-cert-file=" + tmpdir + "/ca.crt",
"--cluster-signing-key-file=" + tmpdir + "/ca.key",
"--use-service-account-credentials=true",
"--controllers=*,bootstrapsigner,tokencleaner",
}
},
},
}
for _, test := range tests {
// Create temp folder for the test case
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
test.cfg.CertificatesDir = tmpdir
if err := certs.CreatePKIAssets(test.cfg); err != nil {
t.Errorf("failed creating pki assets: %v", err)
}
// delete ca.key if test.caKeyPresent is false
if !test.caKeyPresent {
if err := os.Remove(filepath.Join(test.cfg.CertificatesDir, "ca.key")); err != nil {
t.Errorf("failed removing ca.key: %v", err)
}
}
actual := getControllerManagerCommand(test.cfg, version.MustParseSemantic(test.cfg.KubernetesVersion))
expected := test.expectedArgFunc(tmpdir)
sort.Strings(actual)
sort.Strings(expected)
if !reflect.DeepEqual(actual, expected) {
t.Errorf("failed getControllerManagerCommand:\nexpected:\n%v\nsaw:\n%v", expected, actual)
}
}
}
func TestGetSchedulerCommand(t *testing.T) {
var tests = []struct {
cfg *kubeadmapi.MasterConfiguration
expected []string
}{
{
cfg: &kubeadmapi.MasterConfiguration{},
expected: []string{
"kube-scheduler",
"--address=127.0.0.1",
"--leader-elect=true",
"--kubeconfig=" + kubeadmconstants.KubernetesDir + "/scheduler.conf",
},
},
}
for _, rt := range tests {
actual := getSchedulerCommand(rt.cfg)
sort.Strings(actual)
sort.Strings(rt.expected)
if !reflect.DeepEqual(actual, rt.expected) {
t.Errorf("failed getSchedulerCommand:\nexpected:\n%v\nsaw:\n%v", rt.expected, actual)
}
}
}
func TestGetAuthzParameters(t *testing.T) {
var tests = []struct {
authMode []string
expected []string
}{
{
authMode: []string{},
expected: []string{
"--authorization-mode=Node,RBAC",
},
},
{
authMode: []string{"RBAC"},
expected: []string{
"--authorization-mode=RBAC",
},
},
{
authMode: []string{"AlwaysAllow"},
expected: []string{
"--authorization-mode=AlwaysAllow",
},
},
{
authMode: []string{"AlwaysDeny"},
expected: []string{
"--authorization-mode=AlwaysDeny",
},
},
{
authMode: []string{"ABAC"},
expected: []string{
"--authorization-mode=ABAC",
"--authorization-policy-file=/etc/kubernetes/abac_policy.json",
},
},
{
authMode: []string{"ABAC", "Webhook"},
expected: []string{
"--authorization-mode=ABAC,Webhook",
"--authorization-policy-file=/etc/kubernetes/abac_policy.json",
"--authorization-webhook-config-file=/etc/kubernetes/webhook_authz.conf",
},
},
{
authMode: []string{"ABAC", "RBAC", "Webhook"},
expected: []string{
"--authorization-mode=ABAC,RBAC,Webhook",
"--authorization-policy-file=/etc/kubernetes/abac_policy.json",
"--authorization-webhook-config-file=/etc/kubernetes/webhook_authz.conf",
},
},
{
authMode: []string{"Node", "RBAC", "Webhook", "ABAC"},
expected: []string{
"--authorization-mode=Node,RBAC,Webhook,ABAC",
"--authorization-policy-file=/etc/kubernetes/abac_policy.json",
"--authorization-webhook-config-file=/etc/kubernetes/webhook_authz.conf",
},
},
}
for _, rt := range tests {
actual := getAuthzParameters(rt.authMode)
sort.Strings(actual)
sort.Strings(rt.expected)
if !reflect.DeepEqual(actual, rt.expected) {
t.Errorf("failed getAuthzParameters:\nexpected:\n%v\nsaw:\n%v", rt.expected, actual)
}
}
}

View File

@ -0,0 +1,226 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controlplane
import (
"fmt"
"os"
"path/filepath"
"strings"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
staticpodutil "k8s.io/kubernetes/cmd/kubeadm/app/util/staticpod"
)
const (
caCertsVolumeName = "ca-certs"
caCertsVolumePath = "/etc/ssl/certs"
caCertsPkiVolumeName = "ca-certs-etc-pki"
flexvolumeDirVolumeName = "flexvolume-dir"
cloudConfigVolumeName = "cloud-config"
flexvolumeDirVolumePath = "/usr/libexec/kubernetes/kubelet-plugins/volume/exec"
)
// caCertsPkiVolumePath specifies the path that can be conditionally mounted into the apiserver and controller-manager containers
// as /etc/ssl/certs might be a symlink to it. It's a variable since it may be changed in unit testing. This var MUST NOT be changed
// in normal codepaths during runtime.
var caCertsPkiVolumePath = "/etc/pki"
// getHostPathVolumesForTheControlPlane gets the required hostPath volumes and mounts for the control plane
func getHostPathVolumesForTheControlPlane(cfg *kubeadmapi.MasterConfiguration) controlPlaneHostPathMounts {
hostPathDirectoryOrCreate := v1.HostPathDirectoryOrCreate
hostPathFileOrCreate := v1.HostPathFileOrCreate
mounts := newControlPlaneHostPathMounts()
// HostPath volumes for the API Server
// Read-only mount for the certificates directory
// TODO: Always mount the K8s Certificates directory to a static path inside of the container
mounts.NewHostPathMount(kubeadmconstants.KubeAPIServer, kubeadmconstants.KubeCertificatesVolumeName, cfg.CertificatesDir, cfg.CertificatesDir, true, &hostPathDirectoryOrCreate)
// Read-only mount for the ca certs (/etc/ssl/certs) directory
mounts.NewHostPathMount(kubeadmconstants.KubeAPIServer, caCertsVolumeName, caCertsVolumePath, caCertsVolumePath, true, &hostPathDirectoryOrCreate)
// If external etcd is specified, mount the directories needed for accessing the CA/serving certs and the private key
if len(cfg.Etcd.Endpoints) != 0 {
etcdVols, etcdVolMounts := getEtcdCertVolumes(cfg.Etcd, cfg.CertificatesDir)
mounts.AddHostPathMounts(kubeadmconstants.KubeAPIServer, etcdVols, etcdVolMounts)
}
// HostPath volumes for the controller manager
// Read-only mount for the certificates directory
// TODO: Always mount the K8s Certificates directory to a static path inside of the container
mounts.NewHostPathMount(kubeadmconstants.KubeControllerManager, kubeadmconstants.KubeCertificatesVolumeName, cfg.CertificatesDir, cfg.CertificatesDir, true, &hostPathDirectoryOrCreate)
// Read-only mount for the ca certs (/etc/ssl/certs) directory
mounts.NewHostPathMount(kubeadmconstants.KubeControllerManager, caCertsVolumeName, caCertsVolumePath, caCertsVolumePath, true, &hostPathDirectoryOrCreate)
// Read-only mount for the controller manager kubeconfig file
controllerManagerKubeConfigFile := filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.ControllerManagerKubeConfigFileName)
mounts.NewHostPathMount(kubeadmconstants.KubeControllerManager, kubeadmconstants.KubeConfigVolumeName, controllerManagerKubeConfigFile, controllerManagerKubeConfigFile, true, &hostPathFileOrCreate)
// Read-only mount of the cloud config file if present
if cfg.CloudProvider != "" {
if _, err := os.Stat(DefaultCloudConfigPath); err == nil {
mounts.NewHostPathMount(kubeadmconstants.KubeAPIServer, cloudConfigVolumeName, DefaultCloudConfigPath, DefaultCloudConfigPath, true, &hostPathFileOrCreate)
mounts.NewHostPathMount(kubeadmconstants.KubeControllerManager, cloudConfigVolumeName, DefaultCloudConfigPath, DefaultCloudConfigPath, true, &hostPathFileOrCreate)
}
}
// Mount for the flexvolume directory (/usr/libexec/kubernetes/kubelet-plugins/volume/exec) directory
// Flexvolume dir must NOT be readonly as it is used for third-party plugins to integrate with their storage backends via unix domain socket.
if stat, err := os.Stat(flexvolumeDirVolumePath); err == nil && stat.IsDir() {
mounts.NewHostPathMount(kubeadmconstants.KubeControllerManager, flexvolumeDirVolumeName, flexvolumeDirVolumePath, flexvolumeDirVolumePath, false, &hostPathDirectoryOrCreate)
}
// HostPath volumes for the scheduler
// Read-only mount for the scheduler kubeconfig file
schedulerKubeConfigFile := filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.SchedulerKubeConfigFileName)
mounts.NewHostPathMount(kubeadmconstants.KubeScheduler, kubeadmconstants.KubeConfigVolumeName, schedulerKubeConfigFile, schedulerKubeConfigFile, true, &hostPathFileOrCreate)
// On some systems were we host-mount /etc/ssl/certs, it is also required to mount /etc/pki. This is needed
// due to symlinks pointing from files in /etc/ssl/certs into /etc/pki/
if isPkiVolumeMountNeeded() {
mounts.NewHostPathMount(kubeadmconstants.KubeAPIServer, caCertsPkiVolumeName, caCertsPkiVolumePath, caCertsPkiVolumePath, true, &hostPathDirectoryOrCreate)
mounts.NewHostPathMount(kubeadmconstants.KubeControllerManager, caCertsPkiVolumeName, caCertsPkiVolumePath, caCertsPkiVolumePath, true, &hostPathDirectoryOrCreate)
}
// Merge user defined mounts and ensure unique volume and volume mount
// names
mounts.AddExtraHostPathMounts(kubeadmconstants.KubeAPIServer, cfg.APIServerExtraVolumes, true, &hostPathDirectoryOrCreate)
mounts.AddExtraHostPathMounts(kubeadmconstants.KubeControllerManager, cfg.ControllerManagerExtraVolumes, true, &hostPathDirectoryOrCreate)
mounts.AddExtraHostPathMounts(kubeadmconstants.KubeScheduler, cfg.SchedulerExtraVolumes, true, &hostPathDirectoryOrCreate)
return mounts
}
// controlPlaneHostPathMounts is a helper struct for handling all the control plane's hostPath mounts in an easy way
type controlPlaneHostPathMounts struct {
// volumes is a nested map that forces a unique volumes. The outer map's
// keys are a string that should specify the target component to add the
// volume to. The values (inner map) of the outer map are maps with string
// keys and v1.Volume values. The inner map's key should specify the volume
// name.
volumes map[string]map[string]v1.Volume
// volumeMounts is a nested map that forces a unique volume mounts. The
// outer map's keys are a string that should specify the target component
// to add the volume mount to. The values (inner map) of the outer map are
// maps with string keys and v1.VolumeMount values. The inner map's key
// should specify the volume mount name.
volumeMounts map[string]map[string]v1.VolumeMount
}
func newControlPlaneHostPathMounts() controlPlaneHostPathMounts {
return controlPlaneHostPathMounts{
volumes: map[string]map[string]v1.Volume{},
volumeMounts: map[string]map[string]v1.VolumeMount{},
}
}
func (c *controlPlaneHostPathMounts) NewHostPathMount(component, mountName, hostPath, containerPath string, readOnly bool, hostPathType *v1.HostPathType) {
vol := staticpodutil.NewVolume(mountName, hostPath, hostPathType)
c.addComponentVolume(component, vol)
volMount := staticpodutil.NewVolumeMount(mountName, containerPath, readOnly)
c.addComponentVolumeMount(component, volMount)
}
func (c *controlPlaneHostPathMounts) AddHostPathMounts(component string, vols []v1.Volume, volMounts []v1.VolumeMount) {
for _, v := range vols {
c.addComponentVolume(component, v)
}
for _, v := range volMounts {
c.addComponentVolumeMount(component, v)
}
}
// AddExtraHostPathMounts adds host path mounts and overwrites the default
// paths in the case that a user specifies the same volume/volume mount name.
func (c *controlPlaneHostPathMounts) AddExtraHostPathMounts(component string, extraVols []kubeadmapi.HostPathMount, readOnly bool, hostPathType *v1.HostPathType) {
for _, extraVol := range extraVols {
fmt.Printf("[controlplane] Adding extra host path mount %q to %q\n", extraVol.Name, component)
c.NewHostPathMount(component, extraVol.Name, extraVol.HostPath, extraVol.MountPath, readOnly, hostPathType)
}
}
func (c *controlPlaneHostPathMounts) GetVolumes(component string) map[string]v1.Volume {
return c.volumes[component]
}
func (c *controlPlaneHostPathMounts) GetVolumeMounts(component string) map[string]v1.VolumeMount {
return c.volumeMounts[component]
}
func (c *controlPlaneHostPathMounts) addComponentVolume(component string, vol v1.Volume) {
if _, ok := c.volumes[component]; !ok {
c.volumes[component] = map[string]v1.Volume{}
}
c.volumes[component][vol.Name] = vol
}
func (c *controlPlaneHostPathMounts) addComponentVolumeMount(component string, volMount v1.VolumeMount) {
if _, ok := c.volumeMounts[component]; !ok {
c.volumeMounts[component] = map[string]v1.VolumeMount{}
}
c.volumeMounts[component][volMount.Name] = volMount
}
// getEtcdCertVolumes returns the volumes/volumemounts needed for talking to an external etcd cluster
func getEtcdCertVolumes(etcdCfg kubeadmapi.Etcd, k8sCertificatesDir string) ([]v1.Volume, []v1.VolumeMount) {
certPaths := []string{etcdCfg.CAFile, etcdCfg.CertFile, etcdCfg.KeyFile}
certDirs := sets.NewString()
for _, certPath := range certPaths {
certDir := filepath.Dir(certPath)
// Ignore ".", which is the result of passing an empty path.
// Also ignore the cert directories that already may be mounted; /etc/ssl/certs, /etc/pki or Kubernetes CertificatesDir
// If the etcd certs are in there, it's okay, we don't have to do anything
if certDir == "." || strings.HasPrefix(certDir, caCertsVolumePath) || strings.HasPrefix(certDir, caCertsPkiVolumePath) || strings.HasPrefix(certDir, k8sCertificatesDir) {
continue
}
// Filter out any existing hostpath mounts in the list that contains a subset of the path
alreadyExists := false
for _, existingCertDir := range certDirs.List() {
// If the current directory is a parent of an existing one, remove the already existing one
if strings.HasPrefix(existingCertDir, certDir) {
certDirs.Delete(existingCertDir)
} else if strings.HasPrefix(certDir, existingCertDir) {
// If an existing directory is a parent of the current one, don't add the current one
alreadyExists = true
}
}
if alreadyExists {
continue
}
certDirs.Insert(certDir)
}
volumes := []v1.Volume{}
volumeMounts := []v1.VolumeMount{}
pathType := v1.HostPathDirectoryOrCreate
for i, certDir := range certDirs.List() {
name := fmt.Sprintf("etcd-certs-%d", i)
volumes = append(volumes, staticpodutil.NewVolume(name, certDir, &pathType))
volumeMounts = append(volumeMounts, staticpodutil.NewVolumeMount(name, certDir, true))
}
return volumes, volumeMounts
}
// isPkiVolumeMountNeeded specifies whether /etc/pki should be host-mounted into the containers
// On some systems were we host-mount /etc/ssl/certs, it is also required to mount /etc/pki. This is needed
// due to symlinks pointing from files in /etc/ssl/certs into /etc/pki/
func isPkiVolumeMountNeeded() bool {
if _, err := os.Stat(caCertsPkiVolumePath); err == nil {
return true
}
return false
}

View File

@ -0,0 +1,609 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controlplane
import (
"fmt"
"io/ioutil"
"os"
"reflect"
"testing"
"k8s.io/api/core/v1"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
)
func TestGetEtcdCertVolumes(t *testing.T) {
hostPathDirectoryOrCreate := v1.HostPathDirectoryOrCreate
k8sCertifcatesDir := "/etc/kubernetes/pki"
var tests = []struct {
ca, cert, key string
vol []v1.Volume
volMount []v1.VolumeMount
}{
{
// Should ignore files in /etc/ssl/certs
ca: "/etc/ssl/certs/my-etcd-ca.crt",
cert: "/etc/ssl/certs/my-etcd.crt",
key: "/etc/ssl/certs/my-etcd.key",
vol: []v1.Volume{},
volMount: []v1.VolumeMount{},
},
{
// Should ignore files in subdirs of /etc/ssl/certs
ca: "/etc/ssl/certs/etcd/my-etcd-ca.crt",
cert: "/etc/ssl/certs/etcd/my-etcd.crt",
key: "/etc/ssl/certs/etcd/my-etcd.key",
vol: []v1.Volume{},
volMount: []v1.VolumeMount{},
},
{
// Should ignore files in /etc/pki
ca: "/etc/pki/my-etcd-ca.crt",
cert: "/etc/pki/my-etcd.crt",
key: "/etc/pki/my-etcd.key",
vol: []v1.Volume{},
volMount: []v1.VolumeMount{},
},
{
// Should ignore files in Kubernetes PKI directory (and subdirs)
ca: k8sCertifcatesDir + "/ca/my-etcd-ca.crt",
cert: k8sCertifcatesDir + "/my-etcd.crt",
key: k8sCertifcatesDir + "/my-etcd.key",
vol: []v1.Volume{},
volMount: []v1.VolumeMount{},
},
{
// All in the same dir
ca: "/var/lib/certs/etcd/my-etcd-ca.crt",
cert: "/var/lib/certs/etcd/my-etcd.crt",
key: "/var/lib/certs/etcd/my-etcd.key",
vol: []v1.Volume{
{
Name: "etcd-certs-0",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/certs/etcd",
Type: &hostPathDirectoryOrCreate,
},
},
},
},
volMount: []v1.VolumeMount{
{
Name: "etcd-certs-0",
MountPath: "/var/lib/certs/etcd",
ReadOnly: true,
},
},
},
{
// One file + two files in separate dirs
ca: "/etc/certs/etcd/my-etcd-ca.crt",
cert: "/var/lib/certs/etcd/my-etcd.crt",
key: "/var/lib/certs/etcd/my-etcd.key",
vol: []v1.Volume{
{
Name: "etcd-certs-0",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/certs/etcd",
Type: &hostPathDirectoryOrCreate,
},
},
},
{
Name: "etcd-certs-1",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/certs/etcd",
Type: &hostPathDirectoryOrCreate,
},
},
},
},
volMount: []v1.VolumeMount{
{
Name: "etcd-certs-0",
MountPath: "/etc/certs/etcd",
ReadOnly: true,
},
{
Name: "etcd-certs-1",
MountPath: "/var/lib/certs/etcd",
ReadOnly: true,
},
},
},
{
// All three files in different directories
ca: "/etc/certs/etcd/my-etcd-ca.crt",
cert: "/var/lib/certs/etcd/my-etcd.crt",
key: "/var/lib/certs/private/my-etcd.key",
vol: []v1.Volume{
{
Name: "etcd-certs-0",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/certs/etcd",
Type: &hostPathDirectoryOrCreate,
},
},
},
{
Name: "etcd-certs-1",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/certs/etcd",
Type: &hostPathDirectoryOrCreate,
},
},
},
{
Name: "etcd-certs-2",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/certs/private",
Type: &hostPathDirectoryOrCreate,
},
},
},
},
volMount: []v1.VolumeMount{
{
Name: "etcd-certs-0",
MountPath: "/etc/certs/etcd",
ReadOnly: true,
},
{
Name: "etcd-certs-1",
MountPath: "/var/lib/certs/etcd",
ReadOnly: true,
},
{
Name: "etcd-certs-2",
MountPath: "/var/lib/certs/private",
ReadOnly: true,
},
},
},
{
// The most top-level dir should be used
ca: "/etc/certs/etcd/my-etcd-ca.crt",
cert: "/etc/certs/etcd/serving/my-etcd.crt",
key: "/etc/certs/etcd/serving/my-etcd.key",
vol: []v1.Volume{
{
Name: "etcd-certs-0",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/certs/etcd",
Type: &hostPathDirectoryOrCreate,
},
},
},
},
volMount: []v1.VolumeMount{
{
Name: "etcd-certs-0",
MountPath: "/etc/certs/etcd",
ReadOnly: true,
},
},
},
{
// The most top-level dir should be used, regardless of order
ca: "/etc/certs/etcd/ca/my-etcd-ca.crt",
cert: "/etc/certs/etcd/my-etcd.crt",
key: "/etc/certs/etcd/my-etcd.key",
vol: []v1.Volume{
{
Name: "etcd-certs-0",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/certs/etcd",
Type: &hostPathDirectoryOrCreate,
},
},
},
},
volMount: []v1.VolumeMount{
{
Name: "etcd-certs-0",
MountPath: "/etc/certs/etcd",
ReadOnly: true,
},
},
},
}
for _, rt := range tests {
actualVol, actualVolMount := getEtcdCertVolumes(kubeadmapi.Etcd{
CAFile: rt.ca,
CertFile: rt.cert,
KeyFile: rt.key,
}, k8sCertifcatesDir)
if !reflect.DeepEqual(actualVol, rt.vol) {
t.Errorf(
"failed getEtcdCertVolumes:\n\texpected: %v\n\t actual: %v",
rt.vol,
actualVol,
)
}
if !reflect.DeepEqual(actualVolMount, rt.volMount) {
t.Errorf(
"failed getEtcdCertVolumes:\n\texpected: %v\n\t actual: %v",
rt.volMount,
actualVolMount,
)
}
}
}
func TestGetHostPathVolumesForTheControlPlane(t *testing.T) {
hostPathDirectoryOrCreate := v1.HostPathDirectoryOrCreate
hostPathFileOrCreate := v1.HostPathFileOrCreate
volMap := make(map[string]map[string]v1.Volume)
volMap[kubeadmconstants.KubeAPIServer] = map[string]v1.Volume{}
volMap[kubeadmconstants.KubeAPIServer]["k8s-certs"] = v1.Volume{
Name: "k8s-certs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: testCertsDir,
Type: &hostPathDirectoryOrCreate,
},
},
}
volMap[kubeadmconstants.KubeAPIServer]["ca-certs"] = v1.Volume{
Name: "ca-certs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/ssl/certs",
Type: &hostPathDirectoryOrCreate,
},
},
}
volMap[kubeadmconstants.KubeControllerManager] = map[string]v1.Volume{}
volMap[kubeadmconstants.KubeControllerManager]["k8s-certs"] = v1.Volume{
Name: "k8s-certs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: testCertsDir,
Type: &hostPathDirectoryOrCreate,
},
},
}
volMap[kubeadmconstants.KubeControllerManager]["ca-certs"] = v1.Volume{
Name: "ca-certs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/ssl/certs",
Type: &hostPathDirectoryOrCreate,
},
},
}
volMap[kubeadmconstants.KubeControllerManager]["kubeconfig"] = v1.Volume{
Name: "kubeconfig",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/kubernetes/controller-manager.conf",
Type: &hostPathFileOrCreate,
},
},
}
volMap[kubeadmconstants.KubeScheduler] = map[string]v1.Volume{}
volMap[kubeadmconstants.KubeScheduler]["kubeconfig"] = v1.Volume{
Name: "kubeconfig",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/kubernetes/scheduler.conf",
Type: &hostPathFileOrCreate,
},
},
}
volMountMap := make(map[string]map[string]v1.VolumeMount)
volMountMap[kubeadmconstants.KubeAPIServer] = map[string]v1.VolumeMount{}
volMountMap[kubeadmconstants.KubeAPIServer]["k8s-certs"] = v1.VolumeMount{
Name: "k8s-certs",
MountPath: testCertsDir,
ReadOnly: true,
}
volMountMap[kubeadmconstants.KubeAPIServer]["ca-certs"] = v1.VolumeMount{
Name: "ca-certs",
MountPath: "/etc/ssl/certs",
ReadOnly: true,
}
volMountMap[kubeadmconstants.KubeControllerManager] = map[string]v1.VolumeMount{}
volMountMap[kubeadmconstants.KubeControllerManager]["k8s-certs"] = v1.VolumeMount{
Name: "k8s-certs",
MountPath: testCertsDir,
ReadOnly: true,
}
volMountMap[kubeadmconstants.KubeControllerManager]["ca-certs"] = v1.VolumeMount{
Name: "ca-certs",
MountPath: "/etc/ssl/certs",
ReadOnly: true,
}
volMountMap[kubeadmconstants.KubeControllerManager]["kubeconfig"] = v1.VolumeMount{
Name: "kubeconfig",
MountPath: "/etc/kubernetes/controller-manager.conf",
ReadOnly: true,
}
volMountMap[kubeadmconstants.KubeScheduler] = map[string]v1.VolumeMount{}
volMountMap[kubeadmconstants.KubeScheduler]["kubeconfig"] = v1.VolumeMount{
Name: "kubeconfig",
MountPath: "/etc/kubernetes/scheduler.conf",
ReadOnly: true,
}
volMap2 := make(map[string]map[string]v1.Volume)
volMap2[kubeadmconstants.KubeAPIServer] = map[string]v1.Volume{}
volMap2[kubeadmconstants.KubeAPIServer]["k8s-certs"] = v1.Volume{
Name: "k8s-certs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: testCertsDir,
Type: &hostPathDirectoryOrCreate,
},
},
}
volMap2[kubeadmconstants.KubeAPIServer]["ca-certs"] = v1.Volume{
Name: "ca-certs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/ssl/certs",
Type: &hostPathDirectoryOrCreate,
},
},
}
volMap2[kubeadmconstants.KubeAPIServer]["etcd-certs-0"] = v1.Volume{
Name: "etcd-certs-0",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/certs/etcd",
Type: &hostPathDirectoryOrCreate,
},
},
}
volMap2[kubeadmconstants.KubeAPIServer]["etcd-certs-1"] = v1.Volume{
Name: "etcd-certs-1",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/etcd/certs",
Type: &hostPathDirectoryOrCreate,
},
},
}
volMap2[kubeadmconstants.KubeControllerManager] = map[string]v1.Volume{}
volMap2[kubeadmconstants.KubeControllerManager]["k8s-certs"] = v1.Volume{
Name: "k8s-certs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: testCertsDir,
Type: &hostPathDirectoryOrCreate,
},
},
}
volMap2[kubeadmconstants.KubeControllerManager]["ca-certs"] = v1.Volume{
Name: "ca-certs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/ssl/certs",
Type: &hostPathDirectoryOrCreate,
},
},
}
volMap2[kubeadmconstants.KubeControllerManager]["kubeconfig"] = v1.Volume{
Name: "kubeconfig",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/kubernetes/controller-manager.conf",
Type: &hostPathFileOrCreate,
},
},
}
volMap2[kubeadmconstants.KubeScheduler] = map[string]v1.Volume{}
volMap2[kubeadmconstants.KubeScheduler]["kubeconfig"] = v1.Volume{
Name: "kubeconfig",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/kubernetes/scheduler.conf",
Type: &hostPathFileOrCreate,
},
},
}
volMountMap2 := make(map[string]map[string]v1.VolumeMount)
volMountMap2[kubeadmconstants.KubeAPIServer] = map[string]v1.VolumeMount{}
volMountMap2[kubeadmconstants.KubeAPIServer]["k8s-certs"] = v1.VolumeMount{
Name: "k8s-certs",
MountPath: testCertsDir,
ReadOnly: true,
}
volMountMap2[kubeadmconstants.KubeAPIServer]["ca-certs"] = v1.VolumeMount{
Name: "ca-certs",
MountPath: "/etc/ssl/certs",
ReadOnly: true,
}
volMountMap2[kubeadmconstants.KubeAPIServer]["etcd-certs-0"] = v1.VolumeMount{
Name: "etcd-certs-0",
MountPath: "/etc/certs/etcd",
ReadOnly: true,
}
volMountMap2[kubeadmconstants.KubeAPIServer]["etcd-certs-1"] = v1.VolumeMount{
Name: "etcd-certs-1",
MountPath: "/var/lib/etcd/certs",
ReadOnly: true,
}
volMountMap2[kubeadmconstants.KubeControllerManager] = map[string]v1.VolumeMount{}
volMountMap2[kubeadmconstants.KubeControllerManager]["k8s-certs"] = v1.VolumeMount{
Name: "k8s-certs",
MountPath: testCertsDir,
ReadOnly: true,
}
volMountMap2[kubeadmconstants.KubeControllerManager]["ca-certs"] = v1.VolumeMount{
Name: "ca-certs",
MountPath: "/etc/ssl/certs",
ReadOnly: true,
}
volMountMap2[kubeadmconstants.KubeControllerManager]["kubeconfig"] = v1.VolumeMount{
Name: "kubeconfig",
MountPath: "/etc/kubernetes/controller-manager.conf",
ReadOnly: true,
}
volMountMap2[kubeadmconstants.KubeScheduler] = map[string]v1.VolumeMount{}
volMountMap2[kubeadmconstants.KubeScheduler]["kubeconfig"] = v1.VolumeMount{
Name: "kubeconfig",
MountPath: "/etc/kubernetes/scheduler.conf",
ReadOnly: true,
}
var tests = []struct {
cfg *kubeadmapi.MasterConfiguration
vol map[string]map[string]v1.Volume
volMount map[string]map[string]v1.VolumeMount
}{
{
// Should ignore files in /etc/ssl/certs
cfg: &kubeadmapi.MasterConfiguration{
CertificatesDir: testCertsDir,
Etcd: kubeadmapi.Etcd{},
},
vol: volMap,
volMount: volMountMap,
},
{
// Should ignore files in /etc/ssl/certs and in CertificatesDir
cfg: &kubeadmapi.MasterConfiguration{
CertificatesDir: testCertsDir,
Etcd: kubeadmapi.Etcd{
Endpoints: []string{"foo"},
CAFile: "/etc/certs/etcd/my-etcd-ca.crt",
CertFile: testCertsDir + "/etcd/my-etcd.crt",
KeyFile: "/var/lib/etcd/certs/my-etcd.key",
},
},
vol: volMap2,
volMount: volMountMap2,
},
}
tmpdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Couldn't create tmpdir")
}
defer os.RemoveAll(tmpdir)
// set up tmp caCertsPkiVolumePath for testing
caCertsPkiVolumePath = fmt.Sprintf("%s/etc/pki", tmpdir)
defer func() { caCertsPkiVolumePath = "/etc/pki" }()
for _, rt := range tests {
mounts := getHostPathVolumesForTheControlPlane(rt.cfg)
// Avoid unit test errors when the flexvolume is mounted
if _, ok := mounts.volumes[kubeadmconstants.KubeControllerManager][flexvolumeDirVolumeName]; ok {
delete(mounts.volumes[kubeadmconstants.KubeControllerManager], flexvolumeDirVolumeName)
}
if _, ok := mounts.volumeMounts[kubeadmconstants.KubeControllerManager][flexvolumeDirVolumeName]; ok {
delete(mounts.volumeMounts[kubeadmconstants.KubeControllerManager], flexvolumeDirVolumeName)
}
if _, ok := mounts.volumeMounts[kubeadmconstants.KubeControllerManager][cloudConfigVolumeName]; ok {
delete(mounts.volumeMounts[kubeadmconstants.KubeControllerManager], cloudConfigVolumeName)
}
if !reflect.DeepEqual(mounts.volumes, rt.vol) {
t.Errorf(
"failed getHostPathVolumesForTheControlPlane:\n\texpected: %v\n\t actual: %v",
rt.vol,
mounts.volumes,
)
}
if !reflect.DeepEqual(mounts.volumeMounts, rt.volMount) {
t.Errorf(
"failed getHostPathVolumesForTheControlPlane:\n\texpected: %v\n\t actual: %v",
rt.volMount,
mounts.volumeMounts,
)
}
}
}
func TestAddExtraHostPathMounts(t *testing.T) {
mounts := newControlPlaneHostPathMounts()
hostPathDirectoryOrCreate := v1.HostPathDirectoryOrCreate
hostPathFileOrCreate := v1.HostPathFileOrCreate
vols := []v1.Volume{
{
Name: "foo",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/tmp/foo",
Type: &hostPathDirectoryOrCreate,
},
},
},
{
Name: "bar",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/tmp/bar",
Type: &hostPathFileOrCreate,
},
},
},
}
volMounts := []v1.VolumeMount{
{
Name: "foo",
MountPath: "/tmp/foo",
ReadOnly: true,
},
{
Name: "bar",
MountPath: "/tmp/bar",
ReadOnly: true,
},
}
mounts.AddHostPathMounts("component", vols, volMounts)
hostPathMounts := []kubeadmapi.HostPathMount{
{
Name: "foo",
HostPath: "/tmp/qux",
MountPath: "/tmp/qux",
},
}
mounts.AddExtraHostPathMounts("component", hostPathMounts, true, &hostPathDirectoryOrCreate)
if _, ok := mounts.volumes["component"]["foo"]; !ok {
t.Errorf("Expected to find volume %q", "foo")
}
vol, _ := mounts.volumes["component"]["foo"]
if vol.Name != "foo" {
t.Errorf("Expected volume name %q", "foo")
}
if vol.HostPath.Path != "/tmp/qux" {
t.Errorf("Expected host path %q", "/tmp/qux")
}
if _, ok := mounts.volumeMounts["component"]["foo"]; !ok {
t.Errorf("Expected to find volume mount %q", "foo")
}
volMount, _ := mounts.volumeMounts["component"]["foo"]
if volMount.Name != "foo" {
t.Errorf("Expected volume mount name %q", "foo")
}
if volMount.MountPath != "/tmp/qux" {
t.Errorf("Expected container path %q", "/tmp/qux")
}
}

View File

@ -0,0 +1,49 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = ["local_test.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd",
library = ":go_default_library",
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/test:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = ["local.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd",
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/images:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
"//cmd/kubeadm/app/util/staticpod:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//cmd/kubeadm/app/phases/etcd/spec:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -0,0 +1,76 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package etcd
import (
"fmt"
"k8s.io/api/core/v1"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/images"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
staticpodutil "k8s.io/kubernetes/cmd/kubeadm/app/util/staticpod"
)
const (
etcdVolumeName = "etcd"
)
// CreateLocalEtcdStaticPodManifestFile will write local etcd static pod manifest file.
func CreateLocalEtcdStaticPodManifestFile(manifestDir string, cfg *kubeadmapi.MasterConfiguration) error {
// gets etcd StaticPodSpec, actualized for the current MasterConfiguration
spec := GetEtcdPodSpec(cfg)
// writes etcd StaticPod to disk
if err := staticpodutil.WriteStaticPodToDisk(kubeadmconstants.Etcd, manifestDir, spec); err != nil {
return err
}
fmt.Printf("[etcd] Wrote Static Pod manifest for a local etcd instance to %q\n", kubeadmconstants.GetStaticPodFilepath(kubeadmconstants.Etcd, manifestDir))
return nil
}
// GetEtcdPodSpec returns the etcd static Pod actualized to the context of the current MasterConfiguration
// NB. GetEtcdPodSpec methods holds the information about how kubeadm creates etcd static pod mainfests.
func GetEtcdPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.Pod {
pathType := v1.HostPathDirectoryOrCreate
etcdMounts := map[string]v1.Volume{
etcdVolumeName: staticpodutil.NewVolume(etcdVolumeName, cfg.Etcd.DataDir, &pathType),
}
return staticpodutil.ComponentPod(v1.Container{
Name: kubeadmconstants.Etcd,
Command: getEtcdCommand(cfg),
Image: images.GetCoreImage(kubeadmconstants.Etcd, cfg.ImageRepository, cfg.KubernetesVersion, cfg.Etcd.Image),
// Mount the etcd datadir path read-write so etcd can store data in a more persistent manner
VolumeMounts: []v1.VolumeMount{staticpodutil.NewVolumeMount(etcdVolumeName, cfg.Etcd.DataDir, false)},
LivenessProbe: staticpodutil.ComponentProbe(cfg, kubeadmconstants.Etcd, 2379, "/health", v1.URISchemeHTTP),
}, etcdMounts)
}
// getEtcdCommand builds the right etcd command from the given config object
func getEtcdCommand(cfg *kubeadmapi.MasterConfiguration) []string {
defaultArguments := map[string]string{
"listen-client-urls": "http://127.0.0.1:2379",
"advertise-client-urls": "http://127.0.0.1:2379",
"data-dir": cfg.Etcd.DataDir,
}
command := []string{"etcd"}
command = append(command, kubeadmutil.BuildArgumentListFromMap(defaultArguments, cfg.Etcd.ExtraArgs)...)
return command
}

View File

@ -0,0 +1,125 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package etcd
import (
"os"
"path/filepath"
"reflect"
"sort"
"testing"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
testutil "k8s.io/kubernetes/cmd/kubeadm/test"
)
func TestGetEtcdPodSpec(t *testing.T) {
// Creates a Master Configuration
cfg := &kubeadmapi.MasterConfiguration{
KubernetesVersion: "v1.7.0",
}
// Executes GetEtcdPodSpec
spec := GetEtcdPodSpec(cfg)
// Assert each specs refers to the right pod
if spec.Spec.Containers[0].Name != kubeadmconstants.Etcd {
t.Errorf("getKubeConfigSpecs spec for etcd contains pod %s, expectes %s", spec.Spec.Containers[0].Name, kubeadmconstants.Etcd)
}
}
func TestCreateLocalEtcdStaticPodManifestFile(t *testing.T) {
// Create temp folder for the test case
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
// Creates a Master Configuration
cfg := &kubeadmapi.MasterConfiguration{
KubernetesVersion: "v1.7.0",
}
// Execute createStaticPodFunction
manifestPath := filepath.Join(tmpdir, kubeadmconstants.ManifestsSubDirName)
err := CreateLocalEtcdStaticPodManifestFile(manifestPath, cfg)
if err != nil {
t.Errorf("Error executing CreateEtcdStaticPodManifestFile: %v", err)
}
// Assert expected files are there
testutil.AssertFilesCount(t, manifestPath, 1)
testutil.AssertFileExists(t, manifestPath, kubeadmconstants.Etcd+".yaml")
}
func TestGetEtcdCommand(t *testing.T) {
var tests = []struct {
cfg *kubeadmapi.MasterConfiguration
expected []string
}{
{
cfg: &kubeadmapi.MasterConfiguration{
Etcd: kubeadmapi.Etcd{DataDir: "/var/lib/etcd"},
},
expected: []string{
"etcd",
"--listen-client-urls=http://127.0.0.1:2379",
"--advertise-client-urls=http://127.0.0.1:2379",
"--data-dir=/var/lib/etcd",
},
},
{
cfg: &kubeadmapi.MasterConfiguration{
Etcd: kubeadmapi.Etcd{
DataDir: "/var/lib/etcd",
ExtraArgs: map[string]string{
"listen-client-urls": "http://10.0.1.10:2379",
"advertise-client-urls": "http://10.0.1.10:2379",
},
},
},
expected: []string{
"etcd",
"--listen-client-urls=http://10.0.1.10:2379",
"--advertise-client-urls=http://10.0.1.10:2379",
"--data-dir=/var/lib/etcd",
},
},
{
cfg: &kubeadmapi.MasterConfiguration{
Etcd: kubeadmapi.Etcd{DataDir: "/etc/foo"},
},
expected: []string{
"etcd",
"--listen-client-urls=http://127.0.0.1:2379",
"--advertise-client-urls=http://127.0.0.1:2379",
"--data-dir=/etc/foo",
},
},
}
for _, rt := range tests {
actual := getEtcdCommand(rt.cfg)
sort.Strings(actual)
sort.Strings(rt.expected)
if !reflect.DeepEqual(actual, rt.expected) {
t.Errorf("failed getEtcdCommand:\nexpected:\n%v\nsaw:\n%v", rt.expected, actual)
}
}
}

View File

@ -0,0 +1,32 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"spec.go",
"zz_generated.deepcopy.go",
],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd/spec",
visibility = ["//visibility:public"],
deps = [
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,19 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
package spec

View File

@ -0,0 +1,205 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was collated from types used in:
// https://github.com/coreos/etcd-operator/tree/e7f18696bbdc127fa028a99ca8166a8519749328/pkg/apis/etcd/v1beta2.
// When kubeadm moves to its own repo and controls its own dependencies,
// this file will be no longer be needed.
package spec
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
const (
// CRDResourceKind is the CRD resource kind
CRDResourceKind = "EtcdCluster"
// CRDResourcePlural is the CRD resource plural
CRDResourcePlural = "etcdclusters"
groupName = "etcd.database.coreos.com"
)
var (
// SchemeBuilder is a scheme builder
SchemeBuilder = runtime.NewSchemeBuilder(AddKnownTypes)
// AddToScheme adds to the scheme
AddToScheme = SchemeBuilder.AddToScheme
// SchemeGroupVersion is the scheme version
SchemeGroupVersion = schema.GroupVersion{Group: groupName, Version: "v1beta2"}
// CRDName is the name of the CRD
CRDName = CRDResourcePlural + "." + groupName
)
// Resource gets an EtcdCluster GroupResource for a specified resource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
// AddKnownTypes adds the set of types defined in this package to the supplied scheme.
func AddKnownTypes(s *runtime.Scheme) error {
s.AddKnownTypes(SchemeGroupVersion,
&EtcdCluster{},
&EtcdClusterList{},
)
metav1.AddToGroupVersion(s, SchemeGroupVersion)
return nil
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EtcdClusterList is a list of etcd clusters.
type EtcdClusterList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty"`
Items []EtcdCluster `json:"items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EtcdCluster represents an etcd cluster
type EtcdCluster struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ClusterSpec `json:"spec"`
}
// ClusterSpec represents a cluster spec
type ClusterSpec struct {
// Size is the expected size of the etcd cluster.
// The etcd-operator will eventually make the size of the running
// cluster equal to the expected size.
// The vaild range of the size is from 1 to 7.
Size int `json:"size"`
// BaseImage is the base etcd image name that will be used to launch
// etcd clusters. This is useful for private registries, etc.
//
// If image is not set, default is quay.io/coreos/etcd
BaseImage string `json:"baseImage"`
// Version is the expected version of the etcd cluster.
// The etcd-operator will eventually make the etcd cluster version
// equal to the expected version.
//
// The version must follow the [semver]( http://semver.org) format, for example "3.1.8".
// Only etcd released versions are supported: https://github.com/coreos/etcd/releases
//
// If version is not set, default is "3.1.8".
Version string `json:"version,omitempty"`
// Paused is to pause the control of the operator for the etcd cluster.
Paused bool `json:"paused,omitempty"`
// Pod defines the policy to create pod for the etcd pod.
//
// Updating Pod does not take effect on any existing etcd pods.
Pod *PodPolicy `json:"pod,omitempty"`
// SelfHosted determines if the etcd cluster is used for a self-hosted
// Kubernetes cluster.
//
// SelfHosted is a cluster initialization configuration. It cannot be updated.
SelfHosted *SelfHostedPolicy `json:"selfHosted,omitempty"`
// etcd cluster TLS configuration
TLS *TLSPolicy `json:"TLS,omitempty"`
}
// PodPolicy defines the policy to create pod for the etcd container.
type PodPolicy struct {
// Labels specifies the labels to attach to pods the operator creates for the
// etcd cluster.
// "app" and "etcd_*" labels are reserved for the internal use of the etcd operator.
// Do not overwrite them.
Labels map[string]string `json:"labels,omitempty"`
// NodeSelector specifies a map of key-value pairs. For the pod to be eligible
// to run on a node, the node must have each of the indicated key-value pairs as
// labels.
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// AntiAffinity determines if the etcd-operator tries to avoid putting
// the etcd members in the same cluster onto the same node.
AntiAffinity bool `json:"antiAffinity,omitempty"`
// Resources is the resource requirements for the etcd container.
// This field cannot be updated once the cluster is created.
Resources v1.ResourceRequirements `json:"resources,omitempty"`
// Tolerations specifies the pod's tolerations.
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
// List of environment variables to set in the etcd container.
// This is used to configure etcd process. etcd cluster cannot be created, when
// bad environement variables are provided. Do not overwrite any flags used to
// bootstrap the cluster (for example `--initial-cluster` flag).
// This field cannot be updated.
EtcdEnv []v1.EnvVar `json:"etcdEnv,omitempty"`
// By default, kubernetes will mount a service account token into the etcd pods.
// AutomountServiceAccountToken indicates whether pods running with the service account should have an API token automatically mounted.
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"`
}
// TLSPolicy defines the TLS policy of an etcd cluster
type TLSPolicy struct {
// StaticTLS enables user to generate static x509 certificates and keys,
// put them into Kubernetes secrets, and specify them into here.
Static *StaticTLS `json:"static,omitempty"`
}
// StaticTLS represents static TLS
type StaticTLS struct {
// Member contains secrets containing TLS certs used by each etcd member pod.
Member *MemberSecret `json:"member,omitempty"`
// OperatorSecret is the secret containing TLS certs used by operator to
// talk securely to this cluster.
OperatorSecret string `json:"operatorSecret,omitempty"`
}
// MemberSecret represents a member secret
type MemberSecret struct {
// PeerSecret is the secret containing TLS certs used by each etcd member pod
// for the communication between etcd peers.
PeerSecret string `json:"peerSecret,omitempty"`
// ServerSecret is the secret containing TLS certs used by each etcd member pod
// for the communication between etcd server and its clients.
ServerSecret string `json:"serverSecret,omitempty"`
}
// SelfHostedPolicy represents a self-hosted policy
type SelfHostedPolicy struct {
// BootMemberClientEndpoint specifies a bootstrap member for the cluster.
// If there is no bootstrap member, a completely new cluster will be created.
// The boot member will be removed from the cluster once the self-hosted cluster
// setup successfully.
BootMemberClientEndpoint string `json:"bootMemberClientEndpoint,omitempty"`
// SkipBootMemberRemoval specifies whether the removal of the bootstrap member
// should be skipped. By default the operator will automatically remove the
// bootstrap member from the new cluster - this happens during the pivot
// procedure and is the first step of decommissioning the bootstrap member.
// If unspecified, the default is `false`. If set to `true`, you are
// expected to remove the boot member yourself from the etcd cluster.
SkipBootMemberRemoval bool `json:"skipBootMemberRemoval,omitempty"`
}

View File

@ -0,0 +1,267 @@
// +build !ignore_autogenerated
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
package spec
import (
v1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = *in
if in.Pod != nil {
in, out := &in.Pod, &out.Pod
if *in == nil {
*out = nil
} else {
*out = new(PodPolicy)
(*in).DeepCopyInto(*out)
}
}
if in.SelfHosted != nil {
in, out := &in.SelfHosted, &out.SelfHosted
if *in == nil {
*out = nil
} else {
*out = new(SelfHostedPolicy)
**out = **in
}
}
if in.TLS != nil {
in, out := &in.TLS, &out.TLS
if *in == nil {
*out = nil
} else {
*out = new(TLSPolicy)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec.
func (in *ClusterSpec) DeepCopy() *ClusterSpec {
if in == nil {
return nil
}
out := new(ClusterSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EtcdCluster) DeepCopyInto(out *EtcdCluster) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdCluster.
func (in *EtcdCluster) DeepCopy() *EtcdCluster {
if in == nil {
return nil
}
out := new(EtcdCluster)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *EtcdCluster) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EtcdClusterList) DeepCopyInto(out *EtcdClusterList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]EtcdCluster, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdClusterList.
func (in *EtcdClusterList) DeepCopy() *EtcdClusterList {
if in == nil {
return nil
}
out := new(EtcdClusterList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *EtcdClusterList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MemberSecret) DeepCopyInto(out *MemberSecret) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemberSecret.
func (in *MemberSecret) DeepCopy() *MemberSecret {
if in == nil {
return nil
}
out := new(MemberSecret)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodPolicy) DeepCopyInto(out *PodPolicy) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
in.Resources.DeepCopyInto(&out.Resources)
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]v1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.EtcdEnv != nil {
in, out := &in.EtcdEnv, &out.EtcdEnv
*out = make([]v1.EnvVar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.AutomountServiceAccountToken != nil {
in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken
if *in == nil {
*out = nil
} else {
*out = new(bool)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPolicy.
func (in *PodPolicy) DeepCopy() *PodPolicy {
if in == nil {
return nil
}
out := new(PodPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SelfHostedPolicy) DeepCopyInto(out *SelfHostedPolicy) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfHostedPolicy.
func (in *SelfHostedPolicy) DeepCopy() *SelfHostedPolicy {
if in == nil {
return nil
}
out := new(SelfHostedPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StaticTLS) DeepCopyInto(out *StaticTLS) {
*out = *in
if in.Member != nil {
in, out := &in.Member, &out.Member
if *in == nil {
*out = nil
} else {
*out = new(MemberSecret)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticTLS.
func (in *StaticTLS) DeepCopy() *StaticTLS {
if in == nil {
return nil
}
out := new(StaticTLS)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TLSPolicy) DeepCopyInto(out *TLSPolicy) {
*out = *in
if in.Static != nil {
in, out := &in.Static, &out.Static
if *in == nil {
*out = nil
} else {
*out = new(StaticTLS)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSPolicy.
func (in *TLSPolicy) DeepCopy() *TLSPolicy {
if in == nil {
return nil
}
out := new(TLSPolicy)
in.DeepCopyInto(out)
return out
}

View File

@ -0,0 +1,57 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"kubeconfig.go",
],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig",
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/phases/certs/pkiutil:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
"//cmd/kubeadm/app/util/kubeconfig:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
"//vendor/k8s.io/client-go/util/cert:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["kubeconfig_test.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig",
library = ":go_default_library",
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/phases/certs/pkiutil:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
"//cmd/kubeadm/test:go_default_library",
"//cmd/kubeadm/test/certs:go_default_library",
"//cmd/kubeadm/test/kubeconfig:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
],
)

View File

@ -0,0 +1,35 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubeconfig
/*
PHASE: KUBECONFIG
INPUTS:
From MasterConfiguration
The Master API Server endpoint (AdvertiseAddress + BindPort) is required so the KubeConfig file knows where to find the master
The KubernetesDir path is required for knowing where to put the KubeConfig files
The PKIPath is required for knowing where all certificates should be stored
OUTPUTS:
Files to KubernetesDir (default /etc/kubernetes):
- admin.conf
- kubelet.conf
- scheduler.conf
- controller-manager.conf
*/

View File

@ -0,0 +1,335 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubeconfig
import (
"bytes"
"crypto/x509"
"fmt"
"io"
"os"
"path/filepath"
"crypto/rsa"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
certutil "k8s.io/client-go/util/cert"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
)
// clientCertAuth struct holds info required to build a client certificate to provide authentication info in a kubeconfig object
type clientCertAuth struct {
CAKey *rsa.PrivateKey
Organizations []string
}
// tokenAuth struct holds info required to use a token to provide authentication info in a kubeconfig object
type tokenAuth struct {
Token string
}
// kubeConfigSpec struct holds info required to build a KubeConfig object
type kubeConfigSpec struct {
CACert *x509.Certificate
APIServer string
ClientName string
TokenAuth *tokenAuth
ClientCertAuth *clientCertAuth
}
// CreateInitKubeConfigFiles will create and write to disk all kubeconfig files necessary in the kubeadm init phase
// to establish the control plane, including also the admin kubeconfig file.
// If kubeconfig files already exists, they are used only if evaluated equal; otherwise an error is returned.
func CreateInitKubeConfigFiles(outDir string, cfg *kubeadmapi.MasterConfiguration) error {
return createKubeConfigFiles(
outDir,
cfg,
kubeadmconstants.AdminKubeConfigFileName,
kubeadmconstants.KubeletKubeConfigFileName,
kubeadmconstants.ControllerManagerKubeConfigFileName,
kubeadmconstants.SchedulerKubeConfigFileName,
)
}
// CreateAdminKubeConfigFile create a kubeconfig file for the admin to use and for kubeadm itself.
// If the kubeconfig file already exists, it is used only if evaluated equal; otherwise an error is returned.
func CreateAdminKubeConfigFile(outDir string, cfg *kubeadmapi.MasterConfiguration) error {
return createKubeConfigFiles(outDir, cfg, kubeadmconstants.AdminKubeConfigFileName)
}
// CreateKubeletKubeConfigFile create a kubeconfig file for the Kubelet to use.
// If the kubeconfig file already exists, it is used only if evaluated equal; otherwise an error is returned.
func CreateKubeletKubeConfigFile(outDir string, cfg *kubeadmapi.MasterConfiguration) error {
return createKubeConfigFiles(outDir, cfg, kubeadmconstants.KubeletKubeConfigFileName)
}
// CreateControllerManagerKubeConfigFile create a kubeconfig file for the ControllerManager to use.
// If the kubeconfig file already exists, it is used only if evaluated equal; otherwise an error is returned.
func CreateControllerManagerKubeConfigFile(outDir string, cfg *kubeadmapi.MasterConfiguration) error {
return createKubeConfigFiles(outDir, cfg, kubeadmconstants.ControllerManagerKubeConfigFileName)
}
// CreateSchedulerKubeConfigFile create a create a kubeconfig file for the Scheduler to use.
// If the kubeconfig file already exists, it is used only if evaluated equal; otherwise an error is returned.
func CreateSchedulerKubeConfigFile(outDir string, cfg *kubeadmapi.MasterConfiguration) error {
return createKubeConfigFiles(outDir, cfg, kubeadmconstants.SchedulerKubeConfigFileName)
}
// createKubeConfigFiles creates all the requested kubeconfig files.
// If kubeconfig files already exists, they are used only if evaluated equal; otherwise an error is returned.
func createKubeConfigFiles(outDir string, cfg *kubeadmapi.MasterConfiguration, kubeConfigFileNames ...string) error {
// gets the KubeConfigSpecs, actualized for the current MasterConfiguration
specs, err := getKubeConfigSpecs(cfg)
if err != nil {
return err
}
for _, kubeConfigFileName := range kubeConfigFileNames {
// retrives the KubeConfigSpec for given kubeConfigFileName
spec, exists := specs[kubeConfigFileName]
if !exists {
return fmt.Errorf("couldn't retrive KubeConfigSpec for %s", kubeConfigFileName)
}
// builds the KubeConfig object
config, err := buildKubeConfigFromSpec(spec)
if err != nil {
return err
}
// writes the KubeConfig to disk if it not exists
if err = createKubeConfigFileIfNotExists(outDir, kubeConfigFileName, config); err != nil {
return err
}
}
return nil
}
// getKubeConfigSpecs returns all KubeConfigSpecs actualized to the context of the current MasterConfiguration
// NB. this methods holds the information about how kubeadm creates kubeconfig files.
func getKubeConfigSpecs(cfg *kubeadmapi.MasterConfiguration) (map[string]*kubeConfigSpec, error) {
caCert, caKey, err := pkiutil.TryLoadCertAndKeyFromDisk(cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName)
if err != nil {
return nil, fmt.Errorf("couldn't create a kubeconfig; the CA files couldn't be loaded: %v", err)
}
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(cfg)
if err != nil {
return nil, err
}
var kubeConfigSpec = map[string]*kubeConfigSpec{
kubeadmconstants.AdminKubeConfigFileName: {
CACert: caCert,
APIServer: masterEndpoint,
ClientName: "kubernetes-admin",
ClientCertAuth: &clientCertAuth{
CAKey: caKey,
Organizations: []string{kubeadmconstants.MastersGroup},
},
},
kubeadmconstants.KubeletKubeConfigFileName: {
CACert: caCert,
APIServer: masterEndpoint,
ClientName: fmt.Sprintf("system:node:%s", cfg.NodeName),
ClientCertAuth: &clientCertAuth{
CAKey: caKey,
Organizations: []string{kubeadmconstants.NodesGroup},
},
},
kubeadmconstants.ControllerManagerKubeConfigFileName: {
CACert: caCert,
APIServer: masterEndpoint,
ClientName: kubeadmconstants.ControllerManagerUser,
ClientCertAuth: &clientCertAuth{
CAKey: caKey,
},
},
kubeadmconstants.SchedulerKubeConfigFileName: {
CACert: caCert,
APIServer: masterEndpoint,
ClientName: kubeadmconstants.SchedulerUser,
ClientCertAuth: &clientCertAuth{
CAKey: caKey,
},
},
}
return kubeConfigSpec, nil
}
// buildKubeConfigFromSpec creates a kubeconfig object for the given kubeConfigSpec
func buildKubeConfigFromSpec(spec *kubeConfigSpec) (*clientcmdapi.Config, error) {
// If this kubeconfig should use token
if spec.TokenAuth != nil {
// create a kubeconfig with a token
return kubeconfigutil.CreateWithToken(
spec.APIServer,
"kubernetes",
spec.ClientName,
certutil.EncodeCertPEM(spec.CACert),
spec.TokenAuth.Token,
), nil
}
// otherwise, create a client certs
clientCertConfig := certutil.Config{
CommonName: spec.ClientName,
Organization: spec.ClientCertAuth.Organizations,
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
}
clientCert, clientKey, err := pkiutil.NewCertAndKey(spec.CACert, spec.ClientCertAuth.CAKey, clientCertConfig)
if err != nil {
return nil, fmt.Errorf("failure while creating %s client certificate: %v", spec.ClientName, err)
}
// create a kubeconfig with the client certs
return kubeconfigutil.CreateWithCerts(
spec.APIServer,
"kubernetes",
spec.ClientName,
certutil.EncodeCertPEM(spec.CACert),
certutil.EncodePrivateKeyPEM(clientKey),
certutil.EncodeCertPEM(clientCert),
), nil
}
// createKubeConfigFileIfNotExists saves the KubeConfig object into a file if there isn't any file at the given path.
// If there already is a KubeConfig file at the given path; kubeadm tries to load it and check if the values in the
// existing and the expected config equals. If they do; kubeadm will just skip writing the file as it's up-to-date,
// but if a file exists but has old content or isn't a kubeconfig file, this function returns an error.
func createKubeConfigFileIfNotExists(outDir, filename string, config *clientcmdapi.Config) error {
kubeConfigFilePath := filepath.Join(outDir, filename)
// Check if the file exist, and if it doesn't, just write it to disk
if _, err := os.Stat(kubeConfigFilePath); os.IsNotExist(err) {
err = kubeconfigutil.WriteToDisk(kubeConfigFilePath, config)
if err != nil {
return fmt.Errorf("failed to save kubeconfig file %s on disk: %v", kubeConfigFilePath, err)
}
fmt.Printf("[kubeconfig] Wrote KubeConfig file to disk: %q\n", filename)
return nil
}
// The kubeconfig already exists, let's check if it has got the same CA and server URL
currentConfig, err := clientcmd.LoadFromFile(kubeConfigFilePath)
if err != nil {
return fmt.Errorf("failed to load kubeconfig file %s that already exists on disk: %v", kubeConfigFilePath, err)
}
expectedCtx := config.CurrentContext
expectedCluster := config.Contexts[expectedCtx].Cluster
currentCtx := currentConfig.CurrentContext
currentCluster := currentConfig.Contexts[currentCtx].Cluster
// If the current CA cert on disk doesn't match the expected CA cert, error out because we have a file, but it's stale
if !bytes.Equal(currentConfig.Clusters[currentCluster].CertificateAuthorityData, config.Clusters[expectedCluster].CertificateAuthorityData) {
return fmt.Errorf("a kubeconfig file %q exists already but has got the wrong CA cert", kubeConfigFilePath)
}
// If the current API Server location on disk doesn't match the expected API server, error out because we have a file, but it's stale
if currentConfig.Clusters[currentCluster].Server != config.Clusters[expectedCluster].Server {
return fmt.Errorf("a kubeconfig file %q exists already but has got the wrong API Server URL", kubeConfigFilePath)
}
// kubeadm doesn't validate the existing kubeconfig file more than this (kubeadm trusts the client certs to be valid)
// Basically, if we find a kubeconfig file with the same path; the same CA cert and the same server URL;
// kubeadm thinks those files are equal and doesn't bother writing a new file
fmt.Printf("[kubeconfig] Using existing up-to-date KubeConfig file: %q\n", filename)
return nil
}
// WriteKubeConfigWithClientCert writes a kubeconfig file - with a client certificate as authentication info - to the given writer.
func WriteKubeConfigWithClientCert(out io.Writer, cfg *kubeadmapi.MasterConfiguration, clientName string) error {
// creates the KubeConfigSpecs, actualized for the current MasterConfiguration
caCert, caKey, err := pkiutil.TryLoadCertAndKeyFromDisk(cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName)
if err != nil {
return fmt.Errorf("couldn't create a kubeconfig; the CA files couldn't be loaded: %v", err)
}
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(cfg)
if err != nil {
return err
}
spec := &kubeConfigSpec{
ClientName: clientName,
APIServer: masterEndpoint,
CACert: caCert,
ClientCertAuth: &clientCertAuth{
CAKey: caKey,
},
}
return writeKubeConfigFromSpec(out, spec)
}
// WriteKubeConfigWithToken writes a kubeconfig file - with a token as client authentication info - to the given writer.
func WriteKubeConfigWithToken(out io.Writer, cfg *kubeadmapi.MasterConfiguration, clientName, token string) error {
// creates the KubeConfigSpecs, actualized for the current MasterConfiguration
caCert, _, err := pkiutil.TryLoadCertAndKeyFromDisk(cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName)
if err != nil {
return fmt.Errorf("couldn't create a kubeconfig; the CA files couldn't be loaded: %v", err)
}
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(cfg)
if err != nil {
return err
}
spec := &kubeConfigSpec{
ClientName: clientName,
APIServer: masterEndpoint,
CACert: caCert,
TokenAuth: &tokenAuth{
Token: token,
},
}
return writeKubeConfigFromSpec(out, spec)
}
// writeKubeConfigFromSpec creates a kubeconfig object from a kubeConfigSpec and writes it to the given writer.
func writeKubeConfigFromSpec(out io.Writer, spec *kubeConfigSpec) error {
// builds the KubeConfig object
config, err := buildKubeConfigFromSpec(spec)
if err != nil {
return err
}
// writes the KubeConfig to disk if it not exists
configBytes, err := clientcmd.Write(*config)
if err != nil {
return fmt.Errorf("failure while serializing admin kubeconfig: %v", err)
}
fmt.Fprintln(out, string(configBytes))
return nil
}

View File

@ -0,0 +1,440 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubeconfig
import (
"bytes"
"crypto/rsa"
"crypto/x509"
"fmt"
"io"
"os"
"reflect"
"testing"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
pkiutil "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
testutil "k8s.io/kubernetes/cmd/kubeadm/test"
certstestutil "k8s.io/kubernetes/cmd/kubeadm/test/certs"
kubeconfigtestutil "k8s.io/kubernetes/cmd/kubeadm/test/kubeconfig"
)
func TestGetKubeConfigSpecsFailsIfCADoesntExists(t *testing.T) {
// Create temp folder for the test case (without a CA)
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
// Creates a Master Configuration pointing to the pkidir folder
cfg := &kubeadmapi.MasterConfiguration{
CertificatesDir: tmpdir,
}
// Executes getKubeConfigSpecs
if _, err := getKubeConfigSpecs(cfg); err == nil {
t.Error("getKubeConfigSpecs didnt failed when expected")
}
}
func TestGetKubeConfigSpecs(t *testing.T) {
// Create temp folder for the test case
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
// Adds a pki folder with a ca certs to the temp folder
pkidir := testutil.SetupPkiDirWithCertificateAuthorithy(t, tmpdir)
// Creates a Master Configuration pointing to the pkidir folder
cfg := &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", BindPort: 1234},
CertificatesDir: pkidir,
NodeName: "valid-node-name",
}
// Executes getKubeConfigSpecs
specs, err := getKubeConfigSpecs(cfg)
if err != nil {
t.Fatal("getKubeConfigSpecs failed!")
}
var assertions = []struct {
kubeConfigFile string
clientName string
organizations []string
}{
{
kubeConfigFile: kubeadmconstants.AdminKubeConfigFileName,
clientName: "kubernetes-admin",
organizations: []string{kubeadmconstants.MastersGroup},
},
{
kubeConfigFile: kubeadmconstants.KubeletKubeConfigFileName,
clientName: fmt.Sprintf("system:node:%s", cfg.NodeName),
organizations: []string{kubeadmconstants.NodesGroup},
},
{
kubeConfigFile: kubeadmconstants.ControllerManagerKubeConfigFileName,
clientName: kubeadmconstants.ControllerManagerUser,
},
{
kubeConfigFile: kubeadmconstants.SchedulerKubeConfigFileName,
clientName: kubeadmconstants.SchedulerUser,
},
}
for _, assertion := range assertions {
// assert the spec for the kubeConfigFile exists
if spec, ok := specs[assertion.kubeConfigFile]; ok {
// Assert clientName
if spec.ClientName != assertion.clientName {
t.Errorf("getKubeConfigSpecs for %s clientName is %s, expected %s", assertion.kubeConfigFile, spec.ClientName, assertion.clientName)
}
// Assert Organizations
if spec.ClientCertAuth == nil || !reflect.DeepEqual(spec.ClientCertAuth.Organizations, assertion.organizations) {
t.Errorf("getKubeConfigSpecs for %s Organizations is %v, expected %v", assertion.kubeConfigFile, spec.ClientCertAuth.Organizations, assertion.organizations)
}
// Asserts MasterConfiguration values injected into spec
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(cfg)
if err != nil {
t.Error(err)
}
if spec.APIServer != masterEndpoint {
t.Errorf("getKubeConfigSpecs didn't injected cfg.APIServer endpoint into spec for %s", assertion.kubeConfigFile)
}
// Asserts CA certs and CA keys loaded into specs
if spec.CACert == nil {
t.Errorf("getKubeConfigSpecs didn't loaded CACert into spec for %s!", assertion.kubeConfigFile)
}
if spec.ClientCertAuth == nil || spec.ClientCertAuth.CAKey == nil {
t.Errorf("getKubeConfigSpecs didn't loaded CAKey into spec for %s!", assertion.kubeConfigFile)
}
} else {
t.Errorf("getKubeConfigSpecs didn't create spec for %s ", assertion.kubeConfigFile)
}
}
}
func TestBuildKubeConfigFromSpecWithClientAuth(t *testing.T) {
// Creates a CA
caCert, caKey := certstestutil.SetupCertificateAuthorithy(t)
// Executes buildKubeConfigFromSpec passing a KubeConfigSpec wiht a ClientAuth
config := setupdKubeConfigWithClientAuth(t, caCert, caKey, "https://1.2.3.4:1234", "myClientName", "myOrg1", "myOrg2")
// Asserts spec data are propagated to the kubeconfig
kubeconfigtestutil.AssertKubeConfigCurrentCluster(t, config, "https://1.2.3.4:1234", caCert)
kubeconfigtestutil.AssertKubeConfigCurrentAuthInfoWithClientCert(t, config, caCert, "myClientName", "myOrg1", "myOrg2")
}
func TestBuildKubeConfigFromSpecWithTokenAuth(t *testing.T) {
// Creates a CA
caCert, _ := certstestutil.SetupCertificateAuthorithy(t)
// Executes buildKubeConfigFromSpec passing a KubeConfigSpec wiht a Token
config := setupdKubeConfigWithTokenAuth(t, caCert, "https://1.2.3.4:1234", "myClientName", "123456")
// Asserts spec data are propagated to the kubeconfig
kubeconfigtestutil.AssertKubeConfigCurrentCluster(t, config, "https://1.2.3.4:1234", caCert)
kubeconfigtestutil.AssertKubeConfigCurrentAuthInfoWithToken(t, config, "myClientName", "123456")
}
func TestCreateKubeConfigFileIfNotExists(t *testing.T) {
// Creates a CAs
caCert, caKey := certstestutil.SetupCertificateAuthorithy(t)
anotherCaCert, anotherCaKey := certstestutil.SetupCertificateAuthorithy(t)
// build kubeconfigs (to be used to test kubeconfigs equality/not equality)
config := setupdKubeConfigWithClientAuth(t, caCert, caKey, "https://1.2.3.4:1234", "myOrg1", "myOrg2")
configWithAnotherClusterCa := setupdKubeConfigWithClientAuth(t, anotherCaCert, anotherCaKey, "https://1.2.3.4:1234", "myOrg1", "myOrg2")
configWithAnotherClusterAddress := setupdKubeConfigWithClientAuth(t, caCert, caKey, "https://3.4.5.6:3456", "myOrg1", "myOrg2")
var tests = []struct {
existingKubeConfig *clientcmdapi.Config
kubeConfig *clientcmdapi.Config
expectedError bool
}{
{ // if there is no existing KubeConfig, creates the kubeconfig
kubeConfig: config,
},
{ // if KubeConfig is equal to the existingKubeConfig - refers to the same cluster -, use the existing (Test idempotency)
existingKubeConfig: config,
kubeConfig: config,
},
{ // if KubeConfig is not equal to the existingKubeConfig - refers to the another cluster (a cluster with another Ca) -, raise error
existingKubeConfig: config,
kubeConfig: configWithAnotherClusterCa,
expectedError: true,
},
{ // if KubeConfig is not equal to the existingKubeConfig - refers to the another cluster (a cluster with another address) -, raise error
existingKubeConfig: config,
kubeConfig: configWithAnotherClusterAddress,
expectedError: true,
},
}
for _, test := range tests {
// Create temp folder for the test case
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
// Writes the existing kubeconfig file to disk
if test.existingKubeConfig != nil {
if err := createKubeConfigFileIfNotExists(tmpdir, "test.conf", test.existingKubeConfig); err != nil {
t.Errorf("createKubeConfigFileIfNotExists failed")
}
}
// Writes the KubeConfig file to disk
err := createKubeConfigFileIfNotExists(tmpdir, "test.conf", test.kubeConfig)
if test.expectedError && err == nil {
t.Errorf("createKubeConfigFileIfNotExists didn't failed when expected to fail")
}
if !test.expectedError && err != nil {
t.Errorf("createKubeConfigFileIfNotExists failed")
}
// Assert creted files is there
testutil.AssertFileExists(t, tmpdir, "test.conf")
}
}
func TestCreateKubeconfigFilesAndWrappers(t *testing.T) {
var tests = []struct {
createKubeConfigFunction func(outDir string, cfg *kubeadmapi.MasterConfiguration) error
expectedFiles []string
expectedError bool
}{
{ // Test createKubeConfigFiles fails for unknown kubeconfig is requested
createKubeConfigFunction: func(outDir string, cfg *kubeadmapi.MasterConfiguration) error {
return createKubeConfigFiles(outDir, cfg, "unknown.conf")
},
expectedError: true,
},
{ // Test CreateInitKubeConfigFiles (wrapper to createKubeConfigFile)
createKubeConfigFunction: CreateInitKubeConfigFiles,
expectedFiles: []string{
kubeadmconstants.AdminKubeConfigFileName,
kubeadmconstants.KubeletKubeConfigFileName,
kubeadmconstants.ControllerManagerKubeConfigFileName,
kubeadmconstants.SchedulerKubeConfigFileName,
},
},
{ // Test CreateAdminKubeConfigFile (wrapper to createKubeConfigFile)
createKubeConfigFunction: CreateAdminKubeConfigFile,
expectedFiles: []string{kubeadmconstants.AdminKubeConfigFileName},
},
{ // Test CreateKubeletKubeConfigFile (wrapper to createKubeConfigFile)
createKubeConfigFunction: CreateKubeletKubeConfigFile,
expectedFiles: []string{kubeadmconstants.KubeletKubeConfigFileName},
},
{ // Test CreateControllerManagerKubeConfigFile (wrapper to createKubeConfigFile)
createKubeConfigFunction: CreateControllerManagerKubeConfigFile,
expectedFiles: []string{kubeadmconstants.ControllerManagerKubeConfigFileName},
},
{ // Test createKubeConfigFile (wrapper to createKubeConfigFile)
createKubeConfigFunction: CreateSchedulerKubeConfigFile,
expectedFiles: []string{kubeadmconstants.SchedulerKubeConfigFileName},
},
}
for _, test := range tests {
// Create temp folder for the test case
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
// Adds a pki folder with a ca certs to the temp folder
pkidir := testutil.SetupPkiDirWithCertificateAuthorithy(t, tmpdir)
// Creates a Master Configuration pointing to the pkidir folder
cfg := &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", BindPort: 1234},
CertificatesDir: pkidir,
}
// Execs the createKubeConfigFunction
err := test.createKubeConfigFunction(tmpdir, cfg)
if test.expectedError && err == nil {
t.Errorf("createKubeConfigFunction didn't failed when expected to fail")
continue
}
if !test.expectedError && err != nil {
t.Errorf("createKubeConfigFunction failed")
continue
}
// Assert expected files are there
testutil.AssertFileExists(t, tmpdir, test.expectedFiles...)
}
}
func TestWriteKubeConfigFailsIfCADoesntExists(t *testing.T) {
// Temporary folders for the test case (without a CA)
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
// Creates a Master Configuration pointing to the tmpdir folder
cfg := &kubeadmapi.MasterConfiguration{
CertificatesDir: tmpdir,
}
var tests = []struct {
writeKubeConfigFunction func(out io.Writer) error
}{
{ // Test WriteKubeConfigWithClientCert
writeKubeConfigFunction: func(out io.Writer) error {
return WriteKubeConfigWithClientCert(out, cfg, "myUser")
},
},
{ // Test WriteKubeConfigWithToken
writeKubeConfigFunction: func(out io.Writer) error {
return WriteKubeConfigWithToken(out, cfg, "myUser", "12345")
},
},
}
for _, test := range tests {
buf := new(bytes.Buffer)
// executes writeKubeConfigFunction
if err := test.writeKubeConfigFunction(buf); err == nil {
t.Error("writeKubeConfigFunction didnt failed when expected")
}
}
}
func TestWriteKubeConfig(t *testing.T) {
// Temporary folders for the test case
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
// Adds a pki folder with a ca cert to the temp folder
pkidir := testutil.SetupPkiDirWithCertificateAuthorithy(t, tmpdir)
// Retrives ca cert for assertions
caCert, _, err := pkiutil.TryLoadCertAndKeyFromDisk(pkidir, kubeadmconstants.CACertAndKeyBaseName)
if err != nil {
t.Fatalf("couldn't retrive ca cert: %v", err)
}
// Creates a Master Configuration pointing to the pkidir folder
cfg := &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", BindPort: 1234},
CertificatesDir: pkidir,
}
var tests = []struct {
writeKubeConfigFunction func(out io.Writer) error
withClientCert bool
withToken bool
}{
{ // Test WriteKubeConfigWithClientCert
writeKubeConfigFunction: func(out io.Writer) error {
return WriteKubeConfigWithClientCert(out, cfg, "myUser")
},
withClientCert: true,
},
{ // Test WriteKubeConfigWithToken
writeKubeConfigFunction: func(out io.Writer) error {
return WriteKubeConfigWithToken(out, cfg, "myUser", "12345")
},
withToken: true,
},
}
for _, test := range tests {
buf := new(bytes.Buffer)
// executes writeKubeConfigFunction
if err := test.writeKubeConfigFunction(buf); err != nil {
t.Error("writeKubeConfigFunction failed")
continue
}
// reads kubeconfig written to stdout
config, err := clientcmd.Load(buf.Bytes())
if err != nil {
t.Errorf("Couldn't read kubeconfig file from buffer: %v", err)
continue
}
// checks that CLI flags are properly propagated
kubeconfigtestutil.AssertKubeConfigCurrentCluster(t, config, "https://1.2.3.4:1234", caCert)
if test.withClientCert {
// checks that kubeconfig files have expected client cert
kubeconfigtestutil.AssertKubeConfigCurrentAuthInfoWithClientCert(t, config, caCert, "myUser")
}
if test.withToken {
// checks that kubeconfig files have expected token
kubeconfigtestutil.AssertKubeConfigCurrentAuthInfoWithToken(t, config, "myUser", "12345")
}
}
}
// setupdKubeConfigWithClientAuth is a test utility function that wraps buildKubeConfigFromSpec for building a KubeConfig object With ClientAuth
func setupdKubeConfigWithClientAuth(t *testing.T, caCert *x509.Certificate, caKey *rsa.PrivateKey, APIServer, clientName string, organizations ...string) *clientcmdapi.Config {
spec := &kubeConfigSpec{
CACert: caCert,
APIServer: APIServer,
ClientName: clientName,
ClientCertAuth: &clientCertAuth{
CAKey: caKey,
Organizations: organizations,
},
}
config, err := buildKubeConfigFromSpec(spec)
if err != nil {
t.Fatal("buildKubeConfigFromSpec failed!")
}
return config
}
// setupdKubeConfigWithClientAuth is a test utility function that wraps buildKubeConfigFromSpec for building a KubeConfig object With Token
func setupdKubeConfigWithTokenAuth(t *testing.T, caCert *x509.Certificate, APIServer, clientName, token string) *clientcmdapi.Config {
spec := &kubeConfigSpec{
CACert: caCert,
APIServer: APIServer,
ClientName: clientName,
TokenAuth: &tokenAuth{
Token: token,
},
}
config, err := buildKubeConfigFromSpec(spec)
if err != nil {
t.Fatal("buildKubeConfigFromSpec failed!")
}
return config
}

View File

@ -0,0 +1,57 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["kubelet.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet",
visibility = ["//visibility:public"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
"//cmd/kubeadm/app/util/apiclient:go_default_library",
"//cmd/kubeadm/app/util/kubeconfig:go_default_library",
"//pkg/apis/rbac/v1:go_default_library",
"//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library",
"//pkg/kubelet/apis/kubeletconfig/v1alpha1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["kubelet_test.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet",
library = ":go_default_library",
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//pkg/kubelet/apis/kubeletconfig/v1alpha1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,234 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
rbachelper "k8s.io/kubernetes/pkg/apis/rbac/v1"
kubeletconfigscheme "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme"
kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1"
)
// CreateBaseKubeletConfiguration creates base kubelet configuration for dynamic kubelet configuration feature.
func CreateBaseKubeletConfiguration(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
fmt.Printf("[kubelet] Uploading a ConfigMap %q in namespace %s with base configuration for the kubelets in the cluster\n",
kubeadmconstants.KubeletBaseConfigurationConfigMap, metav1.NamespaceSystem)
_, kubeletCodecs, err := kubeletconfigscheme.NewSchemeAndCodecs()
if err != nil {
return err
}
kubeletBytes, err := kubeadmutil.MarshalToYamlForCodecs(cfg.KubeletConfiguration.BaseConfig, kubeletconfigv1alpha1.SchemeGroupVersion, *kubeletCodecs)
if err != nil {
return err
}
if err = apiclient.CreateOrUpdateConfigMap(client, &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: kubeadmconstants.KubeletBaseConfigurationConfigMap,
Namespace: metav1.NamespaceSystem,
},
Data: map[string]string{
kubeadmconstants.KubeletBaseConfigurationConfigMapKey: string(kubeletBytes),
},
}); err != nil {
return err
}
if err := createKubeletBaseConfigMapRBACRules(client); err != nil {
return fmt.Errorf("error creating base kubelet configmap RBAC rules: %v", err)
}
return updateNodeWithConfigMap(client, cfg.NodeName)
}
// ConsumeBaseKubeletConfiguration consumes base kubelet configuration for dynamic kubelet configuration feature.
func ConsumeBaseKubeletConfiguration(nodeName string) error {
client, err := getLocalNodeTLSBootstrappedClient()
if err != nil {
return err
}
kubeletCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(kubeadmconstants.KubeletBaseConfigurationConfigMap, metav1.GetOptions{})
if err != nil {
return err
}
if err := writeInitKubeletConfigToDisk([]byte(kubeletCfg.Data[kubeadmconstants.KubeletBaseConfigurationConfigMapKey])); err != nil {
return fmt.Errorf("failed to write initial remote configuration of kubelet to disk for node %s: %v", nodeName, err)
}
return updateNodeWithConfigMap(client, nodeName)
}
// updateNodeWithConfigMap updates node ConfigSource with KubeletBaseConfigurationConfigMap
func updateNodeWithConfigMap(client clientset.Interface, nodeName string) error {
fmt.Printf("[kubelet] Using Dynamic Kubelet Config for node %q; config sourced from ConfigMap %q in namespace %s\n",
nodeName, kubeadmconstants.KubeletBaseConfigurationConfigMap, metav1.NamespaceSystem)
// Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned.
return wait.Poll(kubeadmconstants.APICallRetryInterval, kubeadmconstants.UpdateNodeTimeout, func() (bool, error) {
node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
return false, nil
}
oldData, err := json.Marshal(node)
if err != nil {
return false, err
}
kubeletCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(kubeadmconstants.KubeletBaseConfigurationConfigMap, metav1.GetOptions{})
if err != nil {
return false, nil
}
node.Spec.ConfigSource = &v1.NodeConfigSource{
ConfigMapRef: &v1.ObjectReference{
Name: kubeadmconstants.KubeletBaseConfigurationConfigMap,
Namespace: metav1.NamespaceSystem,
UID: kubeletCfg.UID,
},
}
newData, err := json.Marshal(node)
if err != nil {
return false, err
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
if err != nil {
return false, err
}
if _, err := client.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes); err != nil {
if apierrs.IsConflict(err) {
fmt.Println("Temporarily unable to update node metadata due to conflict (will retry)")
return false, nil
}
return false, err
}
return true, nil
})
}
// createKubeletBaseConfigMapRBACRules creates the RBAC rules for exposing the base kubelet ConfigMap in the kube-system namespace to unauthenticated users
func createKubeletBaseConfigMapRBACRules(client clientset.Interface) error {
if err := apiclient.CreateOrUpdateRole(client, &rbac.Role{
ObjectMeta: metav1.ObjectMeta{
Name: kubeadmconstants.KubeletBaseConfigMapRoleName,
Namespace: metav1.NamespaceSystem,
},
Rules: []rbac.PolicyRule{
rbachelper.NewRule("get").Groups("").Resources("configmaps").Names(kubeadmconstants.KubeletBaseConfigurationConfigMap).RuleOrDie(),
},
}); err != nil {
return err
}
return apiclient.CreateOrUpdateRoleBinding(client, &rbac.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: kubeadmconstants.KubeletBaseConfigMapRoleName,
Namespace: metav1.NamespaceSystem,
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "Role",
Name: kubeadmconstants.KubeletBaseConfigMapRoleName,
},
Subjects: []rbac.Subject{
{
Kind: rbac.GroupKind,
Name: kubeadmconstants.NodesGroup,
},
{
Kind: rbac.GroupKind,
Name: kubeadmconstants.NodeBootstrapTokenAuthGroup,
},
},
})
}
// getLocalNodeTLSBootstrappedClient waits for the kubelet to perform the TLS bootstrap
// and then creates a client from config file /etc/kubernetes/kubelet.conf
func getLocalNodeTLSBootstrappedClient() (clientset.Interface, error) {
fmt.Println("[tlsbootstrap] Waiting for the kubelet to perform the TLS Bootstrap...")
kubeletKubeConfig := filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.KubeletKubeConfigFileName)
// Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned.
err := wait.PollImmediateInfinite(kubeadmconstants.APICallRetryInterval, func() (bool, error) {
_, err := os.Stat(kubeletKubeConfig)
return (err == nil), nil
})
if err != nil {
return nil, err
}
return kubeconfigutil.ClientSetFromFile(kubeletKubeConfig)
}
// WriteInitKubeletConfigToDiskOnMaster writes base kubelet configuration to disk on master.
func WriteInitKubeletConfigToDiskOnMaster(cfg *kubeadmapi.MasterConfiguration) error {
fmt.Printf("[kubelet] Writing base configuration of kubelets to disk on master node %s\n", cfg.NodeName)
_, kubeletCodecs, err := kubeletconfigscheme.NewSchemeAndCodecs()
if err != nil {
return err
}
kubeletBytes, err := kubeadmutil.MarshalToYamlForCodecs(cfg.KubeletConfiguration.BaseConfig, kubeletconfigv1alpha1.SchemeGroupVersion, *kubeletCodecs)
if err != nil {
return err
}
if err := writeInitKubeletConfigToDisk(kubeletBytes); err != nil {
return fmt.Errorf("failed to write base configuration of kubelet to disk on master node %s: %v", cfg.NodeName, err)
}
return nil
}
func writeInitKubeletConfigToDisk(kubeletConfig []byte) error {
if err := os.MkdirAll(kubeadmconstants.KubeletBaseConfigurationDir, 0644); err != nil {
return fmt.Errorf("failed to create directory %q: %v", kubeadmconstants.KubeletBaseConfigurationDir, err)
}
baseConfigFile := filepath.Join(kubeadmconstants.KubeletBaseConfigurationDir, kubeadmconstants.KubeletBaseConfigurationFile)
if err := ioutil.WriteFile(baseConfigFile, kubeletConfig, 0644); err != nil {
return fmt.Errorf("failed to write initial remote configuration of kubelet into file %q: %v", baseConfigFile, err)
}
return nil
}

View File

@ -0,0 +1,134 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1"
)
func TestCreateBaseKubeletConfiguration(t *testing.T) {
nodeName := "fake-node"
client := fake.NewSimpleClientset()
cfg := &kubeadmapi.MasterConfiguration{
NodeName: nodeName,
KubeletConfiguration: kubeadmapi.KubeletConfiguration{
BaseConfig: &kubeletconfigv1alpha1.KubeletConfiguration{
TypeMeta: metav1.TypeMeta{
Kind: "KubeletConfiguration",
},
},
},
}
client.PrependReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: nodeName,
},
Spec: v1.NodeSpec{
ConfigSource: &v1.NodeConfigSource{
ConfigMapRef: &v1.ObjectReference{
UID: "",
},
},
},
}, nil
})
client.PrependReactor("get", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: kubeadmconstants.KubeletBaseConfigurationConfigMap,
Namespace: metav1.NamespaceSystem,
UID: "fake-uid",
},
}, nil
})
client.PrependReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, nil
})
client.PrependReactor("create", "roles", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, nil
})
client.PrependReactor("create", "rolebindings", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, nil
})
client.PrependReactor("create", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, nil
})
if err := CreateBaseKubeletConfiguration(cfg, client); err != nil {
t.Errorf("CreateBaseKubeletConfiguration: unexepected error %v", err)
}
}
func TestUpdateNodeWithConfigMap(t *testing.T) {
nodeName := "fake-node"
client := fake.NewSimpleClientset()
client.PrependReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: nodeName,
},
Spec: v1.NodeSpec{
ConfigSource: &v1.NodeConfigSource{
ConfigMapRef: &v1.ObjectReference{
UID: "",
},
},
},
}, nil
})
client.PrependReactor("get", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: kubeadmconstants.KubeletBaseConfigurationConfigMap,
Namespace: metav1.NamespaceSystem,
UID: "fake-uid",
},
}, nil
})
client.PrependReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, nil
})
if err := updateNodeWithConfigMap(client, nodeName); err != nil {
t.Errorf("UpdateNodeWithConfigMap: unexepected error %v", err)
}
}
func TestCreateKubeletBaseConfigMapRBACRules(t *testing.T) {
client := fake.NewSimpleClientset()
client.PrependReactor("create", "roles", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, nil
})
client.PrependReactor("create", "rolebindings", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, nil
})
if err := createKubeletBaseConfigMapRBACRules(client); err != nil {
t.Errorf("createKubeletBaseConfigMapRBACRules: unexepected error %v", err)
}
}

View File

@ -0,0 +1,53 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = ["markmaster_test.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/markmaster",
library = ":go_default_library",
deps = [
"//cmd/kubeadm/app/constants:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/util/node:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = ["markmaster.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/markmaster",
deps = [
"//cmd/kubeadm/app/constants:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,98 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package markmaster
import (
"encoding/json"
"fmt"
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
)
// MarkMaster taints the master and sets the master label
func MarkMaster(client clientset.Interface, masterName string) error {
fmt.Printf("[markmaster] Will mark node %s as master by adding a label and a taint\n", masterName)
// Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned.
return wait.Poll(kubeadmconstants.APICallRetryInterval, kubeadmconstants.MarkMasterTimeout, func() (bool, error) {
// First get the node object
n, err := client.CoreV1().Nodes().Get(masterName, metav1.GetOptions{})
if err != nil {
return false, nil
}
// The node may appear to have no labels at first,
// so we wait for it to get hostname label.
if _, found := n.ObjectMeta.Labels[kubeletapis.LabelHostname]; !found {
return false, nil
}
oldData, err := json.Marshal(n)
if err != nil {
return false, err
}
// The master node should be tainted and labelled accordingly
markMasterNode(n)
newData, err := json.Marshal(n)
if err != nil {
return false, err
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
if err != nil {
return false, err
}
if _, err := client.CoreV1().Nodes().Patch(n.Name, types.StrategicMergePatchType, patchBytes); err != nil {
if apierrs.IsConflict(err) {
fmt.Println("[markmaster] Temporarily unable to update master node metadata due to conflict (will retry)")
return false, nil
}
return false, err
}
fmt.Printf("[markmaster] Master %s tainted and labelled with key/value: %s=%q\n", masterName, kubeadmconstants.LabelNodeRoleMaster, "")
return true, nil
})
}
func markMasterNode(n *v1.Node) {
n.ObjectMeta.Labels[kubeadmconstants.LabelNodeRoleMaster] = ""
addTaintIfNotExists(n, kubeadmconstants.MasterTaint)
}
func addTaintIfNotExists(n *v1.Node, t v1.Taint) {
for _, taint := range n.Spec.Taints {
if taint == t {
return
}
}
n.Spec.Taints = append(n.Spec.Taints, t)
}

View File

@ -0,0 +1,143 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package markmaster
import (
"bytes"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/util/node"
)
func TestMarkMaster(t *testing.T) {
// Note: this test takes advantage of the deterministic marshalling of
// JSON provided by strategicpatch so that "expectedPatch" can use a
// string equality test instead of a logical JSON equality test. That
// will need to change if strategicpatch's behavior changes in the
// future.
tests := []struct {
name string
existingLabel string
existingTaint *v1.Taint
expectedPatch string
}{
{
"master label and taint missing",
"",
nil,
"{\"metadata\":{\"labels\":{\"node-role.kubernetes.io/master\":\"\"}},\"spec\":{\"taints\":[{\"effect\":\"NoSchedule\",\"key\":\"node-role.kubernetes.io/master\"}]}}",
},
{
"master label missing",
"",
&kubeadmconstants.MasterTaint,
"{\"metadata\":{\"labels\":{\"node-role.kubernetes.io/master\":\"\"}}}",
},
{
"master taint missing",
kubeadmconstants.LabelNodeRoleMaster,
nil,
"{\"spec\":{\"taints\":[{\"effect\":\"NoSchedule\",\"key\":\"node-role.kubernetes.io/master\"}]}}",
},
{
"nothing missing",
kubeadmconstants.LabelNodeRoleMaster,
&kubeadmconstants.MasterTaint,
"{}",
},
}
for _, tc := range tests {
hostname := node.GetHostname("")
masterNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: hostname,
Labels: map[string]string{
kubeletapis.LabelHostname: hostname,
},
},
}
if tc.existingLabel != "" {
masterNode.ObjectMeta.Labels[tc.existingLabel] = ""
}
if tc.existingTaint != nil {
masterNode.Spec.Taints = append(masterNode.Spec.Taints, *tc.existingTaint)
}
jsonNode, err := json.Marshal(masterNode)
if err != nil {
t.Fatalf("MarkMaster(%s): unexpected encoding error: %v", tc.name, err)
}
var patchRequest string
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Content-Type", "application/json")
if req.URL.Path != "/api/v1/nodes/"+hostname {
t.Errorf("MarkMaster(%s): request for unexpected HTTP resource: %v", tc.name, req.URL.Path)
w.WriteHeader(http.StatusNotFound)
return
}
switch req.Method {
case "GET":
case "PATCH":
patchRequest = toString(req.Body)
default:
t.Errorf("MarkMaster(%s): request for unexpected HTTP verb: %v", tc.name, req.Method)
w.WriteHeader(http.StatusNotFound)
return
}
w.WriteHeader(http.StatusOK)
w.Write(jsonNode)
}))
defer s.Close()
cs, err := clientset.NewForConfig(&restclient.Config{Host: s.URL})
if err != nil {
t.Fatalf("MarkMaster(%s): unexpected error building clientset: %v", tc.name, err)
}
err = MarkMaster(cs, hostname)
if err != nil {
t.Errorf("MarkMaster(%s) returned unexpected error: %v", tc.name, err)
}
if tc.expectedPatch != patchRequest {
t.Errorf("MarkMaster(%s) wanted patch %v, got %v", tc.name, tc.expectedPatch, patchRequest)
}
}
}
func toString(r io.Reader) string {
buf := new(bytes.Buffer)
buf.ReadFrom(r)
return buf.String()
}

View File

@ -0,0 +1,60 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = [
"podspec_mutation_test.go",
"selfhosting_test.go",
"selfhosting_volumes_test.go",
],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting",
library = ":go_default_library",
deps = [
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"podspec_mutation.go",
"selfhosting.go",
"selfhosting_volumes.go",
],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting",
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/features:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
"//cmd/kubeadm/app/util/apiclient:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,169 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package selfhosting
import (
"path/filepath"
"k8s.io/api/core/v1"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/features"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
)
const (
// selfHostedKubeConfigDir sets the directory where kubeconfig files for the scheduler and controller-manager should be mounted
// Due to how the projected volume mount works (can only be a full directory, not mount individual files), we must change this from
// the default as mounts cannot be nested (/etc/kubernetes would override /etc/kubernetes/pki)
selfHostedKubeConfigDir = "/etc/kubernetes/kubeconfig"
)
// PodSpecMutatorFunc is a function capable of mutating a PodSpec
type PodSpecMutatorFunc func(*v1.PodSpec)
// GetDefaultMutators gets the mutator functions that alwasy should be used
func GetDefaultMutators() map[string][]PodSpecMutatorFunc {
return map[string][]PodSpecMutatorFunc{
kubeadmconstants.KubeAPIServer: {
addNodeSelectorToPodSpec,
setMasterTolerationOnPodSpec,
setRightDNSPolicyOnPodSpec,
},
kubeadmconstants.KubeControllerManager: {
addNodeSelectorToPodSpec,
setMasterTolerationOnPodSpec,
setRightDNSPolicyOnPodSpec,
},
kubeadmconstants.KubeScheduler: {
addNodeSelectorToPodSpec,
setMasterTolerationOnPodSpec,
setRightDNSPolicyOnPodSpec,
},
}
}
// GetMutatorsFromFeatureGates returns all mutators needed based on the feature gates passed
func GetMutatorsFromFeatureGates(featureGates map[string]bool) map[string][]PodSpecMutatorFunc {
// Here the map of different mutators to use for the control plane's podspec is stored
mutators := GetDefaultMutators()
// Some extra work to be done if we should store the control plane certificates in Secrets
if features.Enabled(featureGates, features.StoreCertsInSecrets) {
// Add the store-certs-in-secrets-specific mutators here so that the self-hosted component starts using them
mutators[kubeadmconstants.KubeAPIServer] = append(mutators[kubeadmconstants.KubeAPIServer], setSelfHostedVolumesForAPIServer)
mutators[kubeadmconstants.KubeControllerManager] = append(mutators[kubeadmconstants.KubeControllerManager], setSelfHostedVolumesForControllerManager)
mutators[kubeadmconstants.KubeScheduler] = append(mutators[kubeadmconstants.KubeScheduler], setSelfHostedVolumesForScheduler)
}
return mutators
}
// mutatePodSpec makes a Static Pod-hosted PodSpec suitable for self-hosting
func mutatePodSpec(mutators map[string][]PodSpecMutatorFunc, name string, podSpec *v1.PodSpec) {
// Get the mutator functions for the component in question, then loop through and execute them
mutatorsForComponent := mutators[name]
for _, mutateFunc := range mutatorsForComponent {
mutateFunc(podSpec)
}
}
// addNodeSelectorToPodSpec makes Pod require to be scheduled on a node marked with the master label
func addNodeSelectorToPodSpec(podSpec *v1.PodSpec) {
if podSpec.NodeSelector == nil {
podSpec.NodeSelector = map[string]string{kubeadmconstants.LabelNodeRoleMaster: ""}
return
}
podSpec.NodeSelector[kubeadmconstants.LabelNodeRoleMaster] = ""
}
// setMasterTolerationOnPodSpec makes the Pod tolerate the master taint
func setMasterTolerationOnPodSpec(podSpec *v1.PodSpec) {
if podSpec.Tolerations == nil {
podSpec.Tolerations = []v1.Toleration{kubeadmconstants.MasterToleration}
return
}
podSpec.Tolerations = append(podSpec.Tolerations, kubeadmconstants.MasterToleration)
}
// setRightDNSPolicyOnPodSpec makes sure the self-hosted components can look up things via kube-dns if necessary
func setRightDNSPolicyOnPodSpec(podSpec *v1.PodSpec) {
podSpec.DNSPolicy = v1.DNSClusterFirstWithHostNet
}
// setSelfHostedVolumesForAPIServer makes sure the self-hosted api server has the right volume source coming from a self-hosted cluster
func setSelfHostedVolumesForAPIServer(podSpec *v1.PodSpec) {
for i, v := range podSpec.Volumes {
// If the volume name matches the expected one; switch the volume source from hostPath to cluster-hosted
if v.Name == kubeadmconstants.KubeCertificatesVolumeName {
podSpec.Volumes[i].VolumeSource = apiServerCertificatesVolumeSource()
}
}
}
// setSelfHostedVolumesForControllerManager makes sure the self-hosted controller manager has the right volume source coming from a self-hosted cluster
func setSelfHostedVolumesForControllerManager(podSpec *v1.PodSpec) {
for i, v := range podSpec.Volumes {
// If the volume name matches the expected one; switch the volume source from hostPath to cluster-hosted
if v.Name == kubeadmconstants.KubeCertificatesVolumeName {
podSpec.Volumes[i].VolumeSource = controllerManagerCertificatesVolumeSource()
} else if v.Name == kubeadmconstants.KubeConfigVolumeName {
podSpec.Volumes[i].VolumeSource = kubeConfigVolumeSource(kubeadmconstants.ControllerManagerKubeConfigFileName)
}
}
// Change directory for the kubeconfig directory to selfHostedKubeConfigDir
for i, vm := range podSpec.Containers[0].VolumeMounts {
if vm.Name == kubeadmconstants.KubeConfigVolumeName {
podSpec.Containers[0].VolumeMounts[i].MountPath = selfHostedKubeConfigDir
}
}
// Rewrite the --kubeconfig path as the volume mount path may not overlap with certs dir, which it does by default (/etc/kubernetes and /etc/kubernetes/pki)
// This is not a problem with hostPath mounts as hostPath supports mounting one file only, instead of always a full directory. Secrets and Projected Volumes
// don't support that.
podSpec.Containers[0].Command = kubeadmutil.ReplaceArgument(podSpec.Containers[0].Command, func(argMap map[string]string) map[string]string {
argMap["kubeconfig"] = filepath.Join(selfHostedKubeConfigDir, kubeadmconstants.ControllerManagerKubeConfigFileName)
return argMap
})
}
// setSelfHostedVolumesForScheduler makes sure the self-hosted scheduler has the right volume source coming from a self-hosted cluster
func setSelfHostedVolumesForScheduler(podSpec *v1.PodSpec) {
for i, v := range podSpec.Volumes {
// If the volume name matches the expected one; switch the volume source from hostPath to cluster-hosted
if v.Name == kubeadmconstants.KubeConfigVolumeName {
podSpec.Volumes[i].VolumeSource = kubeConfigVolumeSource(kubeadmconstants.SchedulerKubeConfigFileName)
}
}
// Change directory for the kubeconfig directory to selfHostedKubeConfigDir
for i, vm := range podSpec.Containers[0].VolumeMounts {
if vm.Name == kubeadmconstants.KubeConfigVolumeName {
podSpec.Containers[0].VolumeMounts[i].MountPath = selfHostedKubeConfigDir
}
}
// Rewrite the --kubeconfig path as the volume mount path may not overlap with certs dir, which it does by default (/etc/kubernetes and /etc/kubernetes/pki)
// This is not a problem with hostPath mounts as hostPath supports mounting one file only, instead of always a full directory. Secrets and Projected Volumes
// don't support that.
podSpec.Containers[0].Command = kubeadmutil.ReplaceArgument(podSpec.Containers[0].Command, func(argMap map[string]string) map[string]string {
argMap["kubeconfig"] = filepath.Join(selfHostedKubeConfigDir, kubeadmconstants.SchedulerKubeConfigFileName)
return argMap
})
}

View File

@ -0,0 +1,468 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package selfhosting
import (
"reflect"
"sort"
"testing"
"k8s.io/api/core/v1"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
)
func TestMutatePodSpec(t *testing.T) {
var tests = []struct {
component string
podSpec *v1.PodSpec
expected v1.PodSpec
}{
{
component: kubeadmconstants.KubeAPIServer,
podSpec: &v1.PodSpec{},
expected: v1.PodSpec{
NodeSelector: map[string]string{
kubeadmconstants.LabelNodeRoleMaster: "",
},
Tolerations: []v1.Toleration{
kubeadmconstants.MasterToleration,
},
DNSPolicy: v1.DNSClusterFirstWithHostNet,
},
},
{
component: kubeadmconstants.KubeControllerManager,
podSpec: &v1.PodSpec{},
expected: v1.PodSpec{
NodeSelector: map[string]string{
kubeadmconstants.LabelNodeRoleMaster: "",
},
Tolerations: []v1.Toleration{
kubeadmconstants.MasterToleration,
},
DNSPolicy: v1.DNSClusterFirstWithHostNet,
},
},
{
component: kubeadmconstants.KubeScheduler,
podSpec: &v1.PodSpec{},
expected: v1.PodSpec{
NodeSelector: map[string]string{
kubeadmconstants.LabelNodeRoleMaster: "",
},
Tolerations: []v1.Toleration{
kubeadmconstants.MasterToleration,
},
DNSPolicy: v1.DNSClusterFirstWithHostNet,
},
},
}
for _, rt := range tests {
mutatePodSpec(GetDefaultMutators(), rt.component, rt.podSpec)
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
t.Errorf("failed mutatePodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
}
}
}
func TestAddNodeSelectorToPodSpec(t *testing.T) {
var tests = []struct {
podSpec *v1.PodSpec
expected v1.PodSpec
}{
{
podSpec: &v1.PodSpec{},
expected: v1.PodSpec{
NodeSelector: map[string]string{
kubeadmconstants.LabelNodeRoleMaster: "",
},
},
},
{
podSpec: &v1.PodSpec{
NodeSelector: map[string]string{
"foo": "bar",
},
},
expected: v1.PodSpec{
NodeSelector: map[string]string{
"foo": "bar",
kubeadmconstants.LabelNodeRoleMaster: "",
},
},
},
}
for _, rt := range tests {
addNodeSelectorToPodSpec(rt.podSpec)
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
t.Errorf("failed addNodeSelectorToPodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
}
}
}
func TestSetMasterTolerationOnPodSpec(t *testing.T) {
var tests = []struct {
podSpec *v1.PodSpec
expected v1.PodSpec
}{
{
podSpec: &v1.PodSpec{},
expected: v1.PodSpec{
Tolerations: []v1.Toleration{
kubeadmconstants.MasterToleration,
},
},
},
{
podSpec: &v1.PodSpec{
Tolerations: []v1.Toleration{
{Key: "foo", Value: "bar"},
},
},
expected: v1.PodSpec{
Tolerations: []v1.Toleration{
{Key: "foo", Value: "bar"},
kubeadmconstants.MasterToleration,
},
},
},
}
for _, rt := range tests {
setMasterTolerationOnPodSpec(rt.podSpec)
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
t.Errorf("failed setMasterTolerationOnPodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
}
}
}
func TestSetRightDNSPolicyOnPodSpec(t *testing.T) {
var tests = []struct {
podSpec *v1.PodSpec
expected v1.PodSpec
}{
{
podSpec: &v1.PodSpec{},
expected: v1.PodSpec{
DNSPolicy: v1.DNSClusterFirstWithHostNet,
},
},
{
podSpec: &v1.PodSpec{
DNSPolicy: v1.DNSClusterFirst,
},
expected: v1.PodSpec{
DNSPolicy: v1.DNSClusterFirstWithHostNet,
},
},
}
for _, rt := range tests {
setRightDNSPolicyOnPodSpec(rt.podSpec)
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
t.Errorf("failed setRightDNSPolicyOnPodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
}
}
}
func TestSetSelfHostedVolumesForAPIServer(t *testing.T) {
hostPathDirectoryOrCreate := v1.HostPathDirectoryOrCreate
var tests = []struct {
podSpec *v1.PodSpec
expected v1.PodSpec
}{
{
podSpec: &v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{
Name: "ca-certs",
MountPath: "/etc/ssl/certs",
},
{
Name: "k8s-certs",
MountPath: "/etc/kubernetes/pki",
},
},
Command: []string{
"--foo=bar",
},
},
},
Volumes: []v1.Volume{
{
Name: "ca-certs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/ssl/certs",
Type: &hostPathDirectoryOrCreate,
},
},
},
{
Name: "k8s-certs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/kubernetes/pki",
Type: &hostPathDirectoryOrCreate,
},
},
},
},
},
expected: v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{
Name: "ca-certs",
MountPath: "/etc/ssl/certs",
},
{
Name: "k8s-certs",
MountPath: "/etc/kubernetes/pki",
},
},
Command: []string{
"--foo=bar",
},
},
},
Volumes: []v1.Volume{
{
Name: "ca-certs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/ssl/certs",
Type: &hostPathDirectoryOrCreate,
},
},
},
{
Name: "k8s-certs",
VolumeSource: apiServerCertificatesVolumeSource(),
},
},
},
},
}
for _, rt := range tests {
setSelfHostedVolumesForAPIServer(rt.podSpec)
sort.Strings(rt.podSpec.Containers[0].Command)
sort.Strings(rt.expected.Containers[0].Command)
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
t.Errorf("failed setSelfHostedVolumesForAPIServer:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
}
}
}
func TestSetSelfHostedVolumesForControllerManager(t *testing.T) {
hostPathFileOrCreate := v1.HostPathFileOrCreate
hostPathDirectoryOrCreate := v1.HostPathDirectoryOrCreate
var tests = []struct {
podSpec *v1.PodSpec
expected v1.PodSpec
}{
{
podSpec: &v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{
Name: "ca-certs",
MountPath: "/etc/ssl/certs",
},
{
Name: "k8s-certs",
MountPath: "/etc/kubernetes/pki",
},
{
Name: "kubeconfig",
MountPath: "/etc/kubernetes/controller-manager.conf",
},
},
Command: []string{
"--kubeconfig=/etc/kubernetes/controller-manager.conf",
"--foo=bar",
},
},
},
Volumes: []v1.Volume{
{
Name: "ca-certs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/ssl/certs",
Type: &hostPathDirectoryOrCreate,
},
},
},
{
Name: "k8s-certs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/kubernetes/pki",
Type: &hostPathDirectoryOrCreate,
},
},
},
{
Name: "kubeconfig",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/kubernetes/controller-manager.conf",
Type: &hostPathFileOrCreate,
},
},
},
},
},
expected: v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{
Name: "ca-certs",
MountPath: "/etc/ssl/certs",
},
{
Name: "k8s-certs",
MountPath: "/etc/kubernetes/pki",
},
{
Name: "kubeconfig",
MountPath: "/etc/kubernetes/kubeconfig",
},
},
Command: []string{
"--kubeconfig=/etc/kubernetes/kubeconfig/controller-manager.conf",
"--foo=bar",
},
},
},
Volumes: []v1.Volume{
{
Name: "ca-certs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/ssl/certs",
Type: &hostPathDirectoryOrCreate,
},
},
},
{
Name: "k8s-certs",
VolumeSource: controllerManagerCertificatesVolumeSource(),
},
{
Name: "kubeconfig",
VolumeSource: kubeConfigVolumeSource(kubeadmconstants.ControllerManagerKubeConfigFileName),
},
},
},
},
}
for _, rt := range tests {
setSelfHostedVolumesForControllerManager(rt.podSpec)
sort.Strings(rt.podSpec.Containers[0].Command)
sort.Strings(rt.expected.Containers[0].Command)
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
t.Errorf("failed setSelfHostedVolumesForControllerManager:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
}
}
}
func TestSetSelfHostedVolumesForScheduler(t *testing.T) {
hostPathFileOrCreate := v1.HostPathFileOrCreate
var tests = []struct {
podSpec *v1.PodSpec
expected v1.PodSpec
}{
{
podSpec: &v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{
Name: "kubeconfig",
MountPath: "/etc/kubernetes/scheduler.conf",
},
},
Command: []string{
"--kubeconfig=/etc/kubernetes/scheduler.conf",
"--foo=bar",
},
},
},
Volumes: []v1.Volume{
{
Name: "kubeconfig",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/kubernetes/scheduler.conf",
Type: &hostPathFileOrCreate,
},
},
},
},
},
expected: v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{
Name: "kubeconfig",
MountPath: "/etc/kubernetes/kubeconfig",
},
},
Command: []string{
"--kubeconfig=/etc/kubernetes/kubeconfig/scheduler.conf",
"--foo=bar",
},
},
},
Volumes: []v1.Volume{
{
Name: "kubeconfig",
VolumeSource: kubeConfigVolumeSource(kubeadmconstants.SchedulerKubeConfigFileName),
},
},
},
},
}
for _, rt := range tests {
setSelfHostedVolumesForScheduler(rt.podSpec)
sort.Strings(rt.podSpec.Containers[0].Command)
sort.Strings(rt.expected.Containers[0].Command)
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
t.Errorf("failed setSelfHostedVolumesForScheduler:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
}
}
}

View File

@ -0,0 +1,173 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package selfhosting
import (
"fmt"
"os"
"time"
apps "k8s.io/api/apps/v1beta2"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/features"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
const (
// selfHostingWaitTimeout describes the maximum amount of time a self-hosting wait process should wait before timing out
selfHostingWaitTimeout = 2 * time.Minute
// selfHostingFailureThreshold describes how many times kubeadm will retry creating the DaemonSets
selfHostingFailureThreshold int = 5
)
// CreateSelfHostedControlPlane is responsible for turning a Static Pod-hosted control plane to a self-hosted one
// It achieves that task this way:
// 1. Load the Static Pod specification from disk (from /etc/kubernetes/manifests)
// 2. Extract the PodSpec from that Static Pod specification
// 3. Mutate the PodSpec to be compatible with self-hosting (add the right labels, taints, etc. so it can schedule correctly)
// 4. Build a new DaemonSet object for the self-hosted component in question. Use the above mentioned PodSpec
// 5. Create the DaemonSet resource. Wait until the Pods are running.
// 6. Remove the Static Pod manifest file. The kubelet will stop the original Static Pod-hosted component that was running.
// 7. The self-hosted containers should now step up and take over.
// 8. In order to avoid race conditions, we have to make sure that static pod is deleted correctly before we continue
// Otherwise, there is a race condition when we proceed without kubelet having restarted the API server correctly and the next .Create call flakes
// 9. Do that for the kube-apiserver, kube-controller-manager and kube-scheduler in a loop
func CreateSelfHostedControlPlane(manifestsDir, kubeConfigDir string, cfg *kubeadmapi.MasterConfiguration, client clientset.Interface, waiter apiclient.Waiter, dryRun bool) error {
// Adjust the timeout slightly to something self-hosting specific
waiter.SetTimeout(selfHostingWaitTimeout)
// Here the map of different mutators to use for the control plane's PodSpec is stored
mutators := GetMutatorsFromFeatureGates(cfg.FeatureGates)
// Some extra work to be done if we should store the control plane certificates in Secrets
if features.Enabled(cfg.FeatureGates, features.StoreCertsInSecrets) {
// Upload the certificates and kubeconfig files from disk to the cluster as Secrets
if err := uploadTLSSecrets(client, cfg.CertificatesDir); err != nil {
return err
}
if err := uploadKubeConfigSecrets(client, kubeConfigDir); err != nil {
return err
}
}
for _, componentName := range kubeadmconstants.MasterComponents {
start := time.Now()
manifestPath := kubeadmconstants.GetStaticPodFilepath(componentName, manifestsDir)
// Since we want this function to be idempotent; just continue and try the next component if this file doesn't exist
if _, err := os.Stat(manifestPath); err != nil {
fmt.Printf("[self-hosted] The Static Pod for the component %q doesn't seem to be on the disk; trying the next one\n", componentName)
continue
}
// Load the Static Pod file in order to be able to create a self-hosted variant of that file
pod, err := volumeutil.LoadPodFromFile(manifestPath)
if err != nil {
return err
}
podSpec := &pod.Spec
// Build a DaemonSet object from the loaded PodSpec
ds := BuildDaemonSet(componentName, podSpec, mutators)
// Create or update the DaemonSet in the API Server, and retry selfHostingFailureThreshold times if it errors out
if err := apiclient.TryRunCommand(func() error {
return apiclient.CreateOrUpdateDaemonSet(client, ds)
}, selfHostingFailureThreshold); err != nil {
return err
}
// Wait for the self-hosted component to come up
if err := waiter.WaitForPodsWithLabel(BuildSelfHostedComponentLabelQuery(componentName)); err != nil {
return err
}
// Remove the old Static Pod manifest if not dryrunning
if !dryRun {
if err := os.RemoveAll(manifestPath); err != nil {
return fmt.Errorf("unable to delete static pod manifest for %s [%v]", componentName, err)
}
}
// Wait for the mirror Pod hash to be removed; otherwise we'll run into race conditions here when the kubelet hasn't had time to
// remove the Static Pod (or the mirror Pod respectively). This implicitely also tests that the API server endpoint is healthy,
// because this blocks until the API server returns a 404 Not Found when getting the Static Pod
staticPodName := fmt.Sprintf("%s-%s", componentName, cfg.NodeName)
if err := waiter.WaitForPodToDisappear(staticPodName); err != nil {
return err
}
// Just as an extra safety check; make sure the API server is returning ok at the /healthz endpoint (although we know it could return a GET answer for a Pod above)
if err := waiter.WaitForAPI(); err != nil {
return err
}
fmt.Printf("[self-hosted] self-hosted %s ready after %f seconds\n", componentName, time.Since(start).Seconds())
}
return nil
}
// BuildDaemonSet is responsible for mutating the PodSpec and return a DaemonSet which is suitable for the self-hosting purporse
func BuildDaemonSet(name string, podSpec *v1.PodSpec, mutators map[string][]PodSpecMutatorFunc) *apps.DaemonSet {
// Mutate the PodSpec so it's suitable for self-hosting
mutatePodSpec(mutators, name, podSpec)
// Return a DaemonSet based on that Spec
return &apps.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: kubeadmconstants.AddSelfHostedPrefix(name),
Namespace: metav1.NamespaceSystem,
Labels: BuildSelfhostedComponentLabels(name),
},
Spec: apps.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: BuildSelfhostedComponentLabels(name),
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: BuildSelfhostedComponentLabels(name),
},
Spec: *podSpec,
},
UpdateStrategy: apps.DaemonSetUpdateStrategy{
// Make the DaemonSet utilize the RollingUpdate rollout strategy
Type: apps.RollingUpdateDaemonSetStrategyType,
},
},
}
}
// BuildSelfhostedComponentLabels returns the labels for a self-hosted component
func BuildSelfhostedComponentLabels(component string) map[string]string {
return map[string]string{
"k8s-app": kubeadmconstants.AddSelfHostedPrefix(component),
}
}
// BuildSelfHostedComponentLabelQuery creates the right query for matching a self-hosted Pod
func BuildSelfHostedComponentLabelQuery(componentName string) string {
return fmt.Sprintf("k8s-app=%s", kubeadmconstants.AddSelfHostedPrefix(componentName))
}

View File

@ -0,0 +1,587 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package selfhosting
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"testing"
apps "k8s.io/api/apps/v1beta2"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/util"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
const (
testAPIServerPod = `
apiVersion: v1
kind: Pod
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
name: kube-apiserver
namespace: kube-system
spec:
containers:
- command:
- kube-apiserver
- --service-account-key-file=/etc/kubernetes/pki/sa.pub
- --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
- --secure-port=6443
- --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --requestheader-group-headers=X-Remote-Group
- --service-cluster-ip-range=10.96.0.0/12
- --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
- --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
- --advertise-address=192.168.1.115
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
- --insecure-port=0
- --experimental-bootstrap-token-auth=true
- --requestheader-username-headers=X-Remote-User
- --requestheader-extra-headers-prefix=X-Remote-Extra-
- --requestheader-allowed-names=front-proxy-client
- --admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota
- --allow-privileged=true
- --client-ca-file=/etc/kubernetes/pki/ca.crt
- --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
- --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
- --authorization-mode=Node,RBAC
- --etcd-servers=http://127.0.0.1:2379
image: gcr.io/google_containers/kube-apiserver-amd64:v1.7.4
livenessProbe:
failureThreshold: 8
httpGet:
host: 127.0.0.1
path: /healthz
port: 6443
scheme: HTTPS
initialDelaySeconds: 15
timeoutSeconds: 15
name: kube-apiserver
resources:
requests:
cpu: 250m
volumeMounts:
- mountPath: /etc/kubernetes/pki
name: k8s-certs
readOnly: true
- mountPath: /etc/ssl/certs
name: ca-certs
readOnly: true
- mountPath: /etc/pki
name: ca-certs-etc-pki
readOnly: true
hostNetwork: true
volumes:
- hostPath:
path: /etc/kubernetes/pki
name: k8s-certs
- hostPath:
path: /etc/ssl/certs
name: ca-certs
- hostPath:
path: /etc/pki
name: ca-certs-etc-pki
status: {}
`
testAPIServerDaemonSet = `apiVersion: apps/v1beta2
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
k8s-app: self-hosted-kube-apiserver
name: self-hosted-kube-apiserver
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: self-hosted-kube-apiserver
template:
metadata:
creationTimestamp: null
labels:
k8s-app: self-hosted-kube-apiserver
spec:
containers:
- command:
- kube-apiserver
- --service-account-key-file=/etc/kubernetes/pki/sa.pub
- --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
- --secure-port=6443
- --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --requestheader-group-headers=X-Remote-Group
- --service-cluster-ip-range=10.96.0.0/12
- --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
- --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
- --advertise-address=192.168.1.115
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
- --insecure-port=0
- --experimental-bootstrap-token-auth=true
- --requestheader-username-headers=X-Remote-User
- --requestheader-extra-headers-prefix=X-Remote-Extra-
- --requestheader-allowed-names=front-proxy-client
- --admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota
- --allow-privileged=true
- --client-ca-file=/etc/kubernetes/pki/ca.crt
- --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
- --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
- --authorization-mode=Node,RBAC
- --etcd-servers=http://127.0.0.1:2379
image: gcr.io/google_containers/kube-apiserver-amd64:v1.7.4
livenessProbe:
failureThreshold: 8
httpGet:
host: 127.0.0.1
path: /healthz
port: 6443
scheme: HTTPS
initialDelaySeconds: 15
timeoutSeconds: 15
name: kube-apiserver
resources:
requests:
cpu: 250m
volumeMounts:
- mountPath: /etc/kubernetes/pki
name: k8s-certs
readOnly: true
- mountPath: /etc/ssl/certs
name: ca-certs
readOnly: true
- mountPath: /etc/pki
name: ca-certs-etc-pki
readOnly: true
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
volumes:
- hostPath:
path: /etc/kubernetes/pki
name: k8s-certs
- hostPath:
path: /etc/ssl/certs
name: ca-certs
- hostPath:
path: /etc/pki
name: ca-certs-etc-pki
updateStrategy:
type: RollingUpdate
status:
currentNumberScheduled: 0
desiredNumberScheduled: 0
numberMisscheduled: 0
numberReady: 0
`
testControllerManagerPod = `
apiVersion: v1
kind: Pod
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
name: kube-controller-manager
namespace: kube-system
spec:
containers:
- command:
- kube-controller-manager
- --leader-elect=true
- --controllers=*,bootstrapsigner,tokencleaner
- --kubeconfig=/etc/kubernetes/controller-manager.conf
- --root-ca-file=/etc/kubernetes/pki/ca.crt
- --service-account-private-key-file=/etc/kubernetes/pki/sa.key
- --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt
- --cluster-signing-key-file=/etc/kubernetes/pki/ca.key
- --address=127.0.0.1
- --use-service-account-credentials=true
image: gcr.io/google_containers/kube-controller-manager-amd64:v1.7.4
livenessProbe:
failureThreshold: 8
httpGet:
host: 127.0.0.1
path: /healthz
port: 10252
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
name: kube-controller-manager
resources:
requests:
cpu: 200m
volumeMounts:
- mountPath: /etc/kubernetes/pki
name: k8s-certs
readOnly: true
- mountPath: /etc/ssl/certs
name: ca-certs
readOnly: true
- mountPath: /etc/kubernetes/controller-manager.conf
name: kubeconfig
readOnly: true
- mountPath: /etc/pki
name: ca-certs-etc-pki
readOnly: true
hostNetwork: true
volumes:
- hostPath:
path: /etc/kubernetes/pki
name: k8s-certs
- hostPath:
path: /etc/ssl/certs
name: ca-certs
- hostPath:
path: /etc/kubernetes/controller-manager.conf
type: FileOrCreate
name: kubeconfig
- hostPath:
path: /etc/pki
name: ca-certs-etc-pki
status: {}
`
testControllerManagerDaemonSet = `apiVersion: apps/v1beta2
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
k8s-app: self-hosted-kube-controller-manager
name: self-hosted-kube-controller-manager
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: self-hosted-kube-controller-manager
template:
metadata:
creationTimestamp: null
labels:
k8s-app: self-hosted-kube-controller-manager
spec:
containers:
- command:
- kube-controller-manager
- --leader-elect=true
- --controllers=*,bootstrapsigner,tokencleaner
- --kubeconfig=/etc/kubernetes/controller-manager.conf
- --root-ca-file=/etc/kubernetes/pki/ca.crt
- --service-account-private-key-file=/etc/kubernetes/pki/sa.key
- --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt
- --cluster-signing-key-file=/etc/kubernetes/pki/ca.key
- --address=127.0.0.1
- --use-service-account-credentials=true
image: gcr.io/google_containers/kube-controller-manager-amd64:v1.7.4
livenessProbe:
failureThreshold: 8
httpGet:
host: 127.0.0.1
path: /healthz
port: 10252
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
name: kube-controller-manager
resources:
requests:
cpu: 200m
volumeMounts:
- mountPath: /etc/kubernetes/pki
name: k8s-certs
readOnly: true
- mountPath: /etc/ssl/certs
name: ca-certs
readOnly: true
- mountPath: /etc/kubernetes/controller-manager.conf
name: kubeconfig
readOnly: true
- mountPath: /etc/pki
name: ca-certs-etc-pki
readOnly: true
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
volumes:
- hostPath:
path: /etc/kubernetes/pki
name: k8s-certs
- hostPath:
path: /etc/ssl/certs
name: ca-certs
- hostPath:
path: /etc/kubernetes/controller-manager.conf
type: FileOrCreate
name: kubeconfig
- hostPath:
path: /etc/pki
name: ca-certs-etc-pki
updateStrategy:
type: RollingUpdate
status:
currentNumberScheduled: 0
desiredNumberScheduled: 0
numberMisscheduled: 0
numberReady: 0
`
testSchedulerPod = `
apiVersion: v1
kind: Pod
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
name: kube-scheduler
namespace: kube-system
spec:
containers:
- command:
- kube-scheduler
- --leader-elect=true
- --kubeconfig=/etc/kubernetes/scheduler.conf
- --address=127.0.0.1
image: gcr.io/google_containers/kube-scheduler-amd64:v1.7.4
livenessProbe:
failureThreshold: 8
httpGet:
host: 127.0.0.1
path: /healthz
port: 10251
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
name: kube-scheduler
resources:
requests:
cpu: 100m
volumeMounts:
- mountPath: /etc/kubernetes/scheduler.conf
name: kubeconfig
readOnly: true
hostNetwork: true
volumes:
- hostPath:
path: /etc/kubernetes/scheduler.conf
type: FileOrCreate
name: kubeconfig
status: {}
`
testSchedulerDaemonSet = `apiVersion: apps/v1beta2
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
k8s-app: self-hosted-kube-scheduler
name: self-hosted-kube-scheduler
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: self-hosted-kube-scheduler
template:
metadata:
creationTimestamp: null
labels:
k8s-app: self-hosted-kube-scheduler
spec:
containers:
- command:
- kube-scheduler
- --leader-elect=true
- --kubeconfig=/etc/kubernetes/scheduler.conf
- --address=127.0.0.1
image: gcr.io/google_containers/kube-scheduler-amd64:v1.7.4
livenessProbe:
failureThreshold: 8
httpGet:
host: 127.0.0.1
path: /healthz
port: 10251
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
name: kube-scheduler
resources:
requests:
cpu: 100m
volumeMounts:
- mountPath: /etc/kubernetes/scheduler.conf
name: kubeconfig
readOnly: true
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
volumes:
- hostPath:
path: /etc/kubernetes/scheduler.conf
type: FileOrCreate
name: kubeconfig
updateStrategy:
type: RollingUpdate
status:
currentNumberScheduled: 0
desiredNumberScheduled: 0
numberMisscheduled: 0
numberReady: 0
`
)
func TestBuildDaemonSet(t *testing.T) {
var tests = []struct {
component string
podBytes []byte
dsBytes []byte
}{
{
component: constants.KubeAPIServer,
podBytes: []byte(testAPIServerPod),
dsBytes: []byte(testAPIServerDaemonSet),
},
{
component: constants.KubeControllerManager,
podBytes: []byte(testControllerManagerPod),
dsBytes: []byte(testControllerManagerDaemonSet),
},
{
component: constants.KubeScheduler,
podBytes: []byte(testSchedulerPod),
dsBytes: []byte(testSchedulerDaemonSet),
},
}
for _, rt := range tests {
tempFile, err := createTempFileWithContent(rt.podBytes)
if err != nil {
t.Errorf("error creating tempfile with content:%v", err)
}
defer os.Remove(tempFile)
pod, err := volumeutil.LoadPodFromFile(tempFile)
if err != nil {
t.Fatalf("couldn't load the specified Pod")
}
podSpec := &pod.Spec
ds := BuildDaemonSet(rt.component, podSpec, GetDefaultMutators())
dsBytes, err := util.MarshalToYaml(ds, apps.SchemeGroupVersion)
if err != nil {
t.Fatalf("failed to marshal daemonset to YAML: %v", err)
}
if !bytes.Equal(dsBytes, rt.dsBytes) {
t.Errorf("failed TestBuildDaemonSet:\nexpected:\n%s\nsaw:\n%s", rt.dsBytes, dsBytes)
}
}
}
func TestLoadPodSpecFromFile(t *testing.T) {
tests := []struct {
content string
expectError bool
}{
{
// Good YAML
content: `
apiVersion: v1
kind: Pod
metadata:
name: testpod
spec:
containers:
- image: gcr.io/google_containers/busybox
`,
expectError: false,
},
{
// Good JSON
content: `
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "testpod"
},
"spec": {
"containers": [
{
"image": "gcr.io/google_containers/busybox"
}
]
}
}`,
expectError: false,
},
{
// Bad PodSpec
content: `
apiVersion: v1
kind: Pod
metadata:
name: testpod
spec:
- image: gcr.io/google_containers/busybox
`,
expectError: true,
},
}
for _, rt := range tests {
tempFile, err := createTempFileWithContent([]byte(rt.content))
if err != nil {
t.Errorf("error creating tempfile with content:%v", err)
}
defer os.Remove(tempFile)
_, err = volumeutil.LoadPodFromFile(tempFile)
if (err != nil) != rt.expectError {
t.Errorf("failed TestLoadPodSpecFromFile:\nexpected error:\n%t\nsaw:\n%v", rt.expectError, err)
}
}
}
func createTempFileWithContent(content []byte) (string, error) {
tempFile, err := ioutil.TempFile("", "")
if err != nil {
return "", fmt.Errorf("cannot create temporary file: %v", err)
}
if _, err = tempFile.Write([]byte(content)); err != nil {
return "", fmt.Errorf("cannot save temporary file: %v", err)
}
if err = tempFile.Close(); err != nil {
return "", fmt.Errorf("cannot close temporary file: %v", err)
}
return tempFile.Name(), nil
}

View File

@ -0,0 +1,298 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package selfhosting
import (
"fmt"
"io/ioutil"
"path/filepath"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
)
type tlsKeyPair struct {
name string
cert string
key string
}
func apiServerCertificatesVolumeSource() v1.VolumeSource {
return v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: kubeadmconstants.CACertAndKeyBaseName,
},
Items: []v1.KeyToPath{
{
Key: v1.TLSCertKey,
Path: kubeadmconstants.CACertName,
},
},
},
},
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: kubeadmconstants.APIServerCertAndKeyBaseName,
},
Items: []v1.KeyToPath{
{
Key: v1.TLSCertKey,
Path: kubeadmconstants.APIServerCertName,
},
{
Key: v1.TLSPrivateKeyKey,
Path: kubeadmconstants.APIServerKeyName,
},
},
},
},
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: kubeadmconstants.APIServerKubeletClientCertAndKeyBaseName,
},
Items: []v1.KeyToPath{
{
Key: v1.TLSCertKey,
Path: kubeadmconstants.APIServerKubeletClientCertName,
},
{
Key: v1.TLSPrivateKeyKey,
Path: kubeadmconstants.APIServerKubeletClientKeyName,
},
},
},
},
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: kubeadmconstants.ServiceAccountKeyBaseName,
},
Items: []v1.KeyToPath{
{
Key: v1.TLSCertKey,
Path: kubeadmconstants.ServiceAccountPublicKeyName,
},
},
},
},
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: kubeadmconstants.FrontProxyCACertAndKeyBaseName,
},
Items: []v1.KeyToPath{
{
Key: v1.TLSCertKey,
Path: kubeadmconstants.FrontProxyCACertName,
},
},
},
},
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: kubeadmconstants.FrontProxyClientCertAndKeyBaseName,
},
Items: []v1.KeyToPath{
{
Key: v1.TLSCertKey,
Path: kubeadmconstants.FrontProxyClientCertName,
},
{
Key: v1.TLSPrivateKeyKey,
Path: kubeadmconstants.FrontProxyClientKeyName,
},
},
},
},
},
},
}
}
func controllerManagerCertificatesVolumeSource() v1.VolumeSource {
return v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: kubeadmconstants.CACertAndKeyBaseName,
},
Items: []v1.KeyToPath{
{
Key: v1.TLSCertKey,
Path: kubeadmconstants.CACertName,
},
{
Key: v1.TLSPrivateKeyKey,
Path: kubeadmconstants.CAKeyName,
},
},
},
},
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: kubeadmconstants.ServiceAccountKeyBaseName,
},
Items: []v1.KeyToPath{
{
Key: v1.TLSPrivateKeyKey,
Path: kubeadmconstants.ServiceAccountPrivateKeyName,
},
},
},
},
},
},
}
}
func kubeConfigVolumeSource(kubeconfigSecretName string) v1.VolumeSource {
return v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: kubeconfigSecretName,
},
}
}
func uploadTLSSecrets(client clientset.Interface, certDir string) error {
for _, tlsKeyPair := range getTLSKeyPairs() {
secret, err := createTLSSecretFromFiles(
tlsKeyPair.name,
filepath.Join(certDir, tlsKeyPair.cert),
filepath.Join(certDir, tlsKeyPair.key),
)
if err != nil {
return err
}
if err := apiclient.CreateOrUpdateSecret(client, secret); err != nil {
return err
}
fmt.Printf("[self-hosted] Created TLS secret %q from %s and %s\n", tlsKeyPair.name, tlsKeyPair.cert, tlsKeyPair.key)
}
return nil
}
func uploadKubeConfigSecrets(client clientset.Interface, kubeConfigDir string) error {
files := []string{
kubeadmconstants.SchedulerKubeConfigFileName,
kubeadmconstants.ControllerManagerKubeConfigFileName,
}
for _, file := range files {
kubeConfigPath := filepath.Join(kubeConfigDir, file)
secret, err := createOpaqueSecretFromFile(file, kubeConfigPath)
if err != nil {
return err
}
if err := apiclient.CreateOrUpdateSecret(client, secret); err != nil {
return err
}
fmt.Printf("[self-hosted] Created secret for kubeconfig file %q\n", file)
}
return nil
}
func createTLSSecretFromFiles(secretName, crt, key string) (*v1.Secret, error) {
crtBytes, err := ioutil.ReadFile(crt)
if err != nil {
return nil, err
}
keyBytes, err := ioutil.ReadFile(key)
if err != nil {
return nil, err
}
return &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: metav1.NamespaceSystem,
},
Type: v1.SecretTypeTLS,
Data: map[string][]byte{
v1.TLSCertKey: crtBytes,
v1.TLSPrivateKeyKey: keyBytes,
},
}, nil
}
func createOpaqueSecretFromFile(secretName, file string) (*v1.Secret, error) {
fileBytes, err := ioutil.ReadFile(file)
if err != nil {
return nil, err
}
return &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: metav1.NamespaceSystem,
},
Type: v1.SecretTypeOpaque,
Data: map[string][]byte{
filepath.Base(file): fileBytes,
},
}, nil
}
func getTLSKeyPairs() []*tlsKeyPair {
return []*tlsKeyPair{
{
name: kubeadmconstants.CACertAndKeyBaseName,
cert: kubeadmconstants.CACertName,
key: kubeadmconstants.CAKeyName,
},
{
name: kubeadmconstants.APIServerCertAndKeyBaseName,
cert: kubeadmconstants.APIServerCertName,
key: kubeadmconstants.APIServerKeyName,
},
{
name: kubeadmconstants.APIServerKubeletClientCertAndKeyBaseName,
cert: kubeadmconstants.APIServerKubeletClientCertName,
key: kubeadmconstants.APIServerKubeletClientKeyName,
},
{
name: kubeadmconstants.ServiceAccountKeyBaseName,
cert: kubeadmconstants.ServiceAccountPublicKeyName,
key: kubeadmconstants.ServiceAccountPrivateKeyName,
},
{
name: kubeadmconstants.FrontProxyCACertAndKeyBaseName,
cert: kubeadmconstants.FrontProxyCACertName,
key: kubeadmconstants.FrontProxyCAKeyName,
},
{
name: kubeadmconstants.FrontProxyClientCertAndKeyBaseName,
cert: kubeadmconstants.FrontProxyClientCertName,
key: kubeadmconstants.FrontProxyClientKeyName,
},
}
}

View File

@ -0,0 +1,72 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package selfhosting
import (
"io/ioutil"
"log"
"os"
"testing"
)
func createTemporaryFile(name string) *os.File {
content := []byte("foo")
tmpfile, err := ioutil.TempFile("", name)
if err != nil {
log.Fatal(err)
}
if _, err := tmpfile.Write(content); err != nil {
log.Fatal(err)
}
return tmpfile
}
func TestCreateTLSSecretFromFile(t *testing.T) {
tmpCert := createTemporaryFile("foo.crt")
defer os.Remove(tmpCert.Name())
tmpKey := createTemporaryFile("foo.key")
defer os.Remove(tmpKey.Name())
_, err := createTLSSecretFromFiles("foo", tmpCert.Name(), tmpKey.Name())
if err != nil {
log.Fatal(err)
}
if err := tmpCert.Close(); err != nil {
log.Fatal(err)
}
if err := tmpKey.Close(); err != nil {
log.Fatal(err)
}
}
func TestCreateOpaqueSecretFromFile(t *testing.T) {
tmpFile := createTemporaryFile("foo")
defer os.Remove(tmpFile.Name())
_, err := createOpaqueSecretFromFile("foo", tmpFile.Name())
if err != nil {
log.Fatal(err)
}
if err := tmpFile.Close(); err != nil {
log.Fatal(err)
}
}

View File

@ -0,0 +1,14 @@
package(default_visibility = ["//visibility:public"])
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,95 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"compute.go",
"configuration.go",
"health.go",
"policy.go",
"postupgrade.go",
"postupgrade_v18_19.go",
"prepull.go",
"selfhosted.go",
"staticpods.go",
"versiongetter.go",
],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade",
visibility = ["//visibility:public"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/validation:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/features:go_default_library",
"//cmd/kubeadm/app/images:go_default_library",
"//cmd/kubeadm/app/phases/addons/dns:go_default_library",
"//cmd/kubeadm/app/phases/addons/proxy:go_default_library",
"//cmd/kubeadm/app/phases/bootstraptoken/clusterinfo:go_default_library",
"//cmd/kubeadm/app/phases/bootstraptoken/node:go_default_library",
"//cmd/kubeadm/app/phases/certs:go_default_library",
"//cmd/kubeadm/app/phases/controlplane:go_default_library",
"//cmd/kubeadm/app/phases/etcd:go_default_library",
"//cmd/kubeadm/app/phases/selfhosting:go_default_library",
"//cmd/kubeadm/app/phases/uploadconfig:go_default_library",
"//cmd/kubeadm/app/preflight:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
"//cmd/kubeadm/app/util/apiclient:go_default_library",
"//cmd/kubeadm/app/util/config:go_default_library",
"//cmd/kubeadm/app/util/dryrun:go_default_library",
"//pkg/api/legacyscheme:go_default_library",
"//pkg/util/version:go_default_library",
"//pkg/version:go_default_library",
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = [
"compute_test.go",
"policy_test.go",
"postupgrade_v18_19_test.go",
"prepull_test.go",
"staticpods_test.go",
],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade",
library = ":go_default_library",
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/phases/certs:go_default_library",
"//cmd/kubeadm/app/phases/certs/pkiutil:go_default_library",
"//cmd/kubeadm/app/phases/controlplane:go_default_library",
"//cmd/kubeadm/app/phases/etcd:go_default_library",
"//cmd/kubeadm/app/util/apiclient:go_default_library",
"//cmd/kubeadm/test:go_default_library",
"//pkg/api/legacyscheme:go_default_library",
"//pkg/util/version:go_default_library",
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
],
)

View File

@ -0,0 +1,290 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"fmt"
"strings"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/features"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns"
"k8s.io/kubernetes/cmd/kubeadm/app/util"
"k8s.io/kubernetes/pkg/util/version"
)
// Upgrade defines an upgrade possibility to upgrade from a current version to a new one
type Upgrade struct {
Description string
Before ClusterState
After ClusterState
}
// CanUpgradeKubelets returns whether an upgrade of any kubelet in the cluster is possible
func (u *Upgrade) CanUpgradeKubelets() bool {
// If there are multiple different versions now, an upgrade is possible (even if only for a subset of the nodes)
if len(u.Before.KubeletVersions) > 1 {
return true
}
// Don't report something available for upgrade if we don't know the current state
if len(u.Before.KubeletVersions) == 0 {
return false
}
// if the same version number existed both before and after, we don't have to upgrade it
_, sameVersionFound := u.Before.KubeletVersions[u.After.KubeVersion]
return !sameVersionFound
}
// ActiveDNSAddon returns the version of CoreDNS or kube-dns
func ActiveDNSAddon(featureGates map[string]bool) string {
if features.Enabled(featureGates, features.CoreDNS) {
return kubeadmconstants.CoreDNS
}
return kubeadmconstants.KubeDNS
}
// ClusterState describes the state of certain versions for a cluster
type ClusterState struct {
// KubeVersion describes the version of the Kubernetes API Server, Controller Manager, Scheduler and Proxy.
KubeVersion string
// DNSVersion describes the version of the kube-dns images used and manifest version
DNSVersion string
// KubeadmVersion describes the version of the kubeadm CLI
KubeadmVersion string
// KubeletVersions is a map with a version number linked to the amount of kubelets running that version in the cluster
KubeletVersions map[string]uint16
// EtcdVersion represents the version of etcd used in the cluster
EtcdVersion string
}
// GetAvailableUpgrades fetches all versions from the specified VersionGetter and computes which
// kinds of upgrades can be performed
func GetAvailableUpgrades(versionGetterImpl VersionGetter, experimentalUpgradesAllowed, rcUpgradesAllowed bool, cluster util.EtcdCluster, featureGates map[string]bool) ([]Upgrade, error) {
fmt.Println("[upgrade] Fetching available versions to upgrade to")
// Collect the upgrades kubeadm can do in this list
upgrades := []Upgrade{}
// Get the cluster version
clusterVersionStr, clusterVersion, err := versionGetterImpl.ClusterVersion()
if err != nil {
return nil, err
}
// Get current kubeadm CLI version
kubeadmVersionStr, kubeadmVersion, err := versionGetterImpl.KubeadmVersion()
if err != nil {
return nil, err
}
// Get and output the current latest stable version
stableVersionStr, stableVersion, err := versionGetterImpl.VersionFromCILabel("stable", "stable version")
if err != nil {
fmt.Printf("[upgrade/versions] WARNING: %v\n", err)
fmt.Println("[upgrade/versions] WARNING: Falling back to current kubeadm version as latest stable version")
stableVersionStr, stableVersion = kubeadmVersionStr, kubeadmVersion
}
// Get the kubelet versions in the cluster
kubeletVersions, err := versionGetterImpl.KubeletVersions()
if err != nil {
return nil, err
}
// Get current etcd version
etcdStatus, err := cluster.GetEtcdClusterStatus()
if err != nil {
return nil, err
}
// Construct a descriptor for the current state of the world
beforeState := ClusterState{
KubeVersion: clusterVersionStr,
DNSVersion: dns.GetDNSVersion(clusterVersion, ActiveDNSAddon(featureGates)),
KubeadmVersion: kubeadmVersionStr,
KubeletVersions: kubeletVersions,
EtcdVersion: etcdStatus.Version,
}
// Do a "dumb guess" that a new minor upgrade is available just because the latest stable version is higher than the cluster version
// This guess will be corrected once we know if there is a patch version available
canDoMinorUpgrade := clusterVersion.LessThan(stableVersion)
// A patch version doesn't exist if the cluster version is higher than or equal to the current stable version
// in the case that a user is trying to upgrade from, let's say, v1.8.0-beta.2 to v1.8.0-rc.1 (given we support such upgrades experimentally)
// a stable-1.8 branch doesn't exist yet. Hence this check.
if patchVersionBranchExists(clusterVersion, stableVersion) {
currentBranch := getBranchFromVersion(clusterVersionStr)
versionLabel := fmt.Sprintf("stable-%s", currentBranch)
description := fmt.Sprintf("version in the v%s series", currentBranch)
// Get and output the latest patch version for the cluster branch
patchVersionStr, patchVersion, err := versionGetterImpl.VersionFromCILabel(versionLabel, description)
if err != nil {
fmt.Printf("[upgrade/versions] WARNING: %v\n", err)
} else {
// Check if a minor version upgrade is possible when a patch release exists
// It's only possible if the latest patch version is higher than the current patch version
// If that's the case, they must be on different branches => a newer minor version can be upgraded to
canDoMinorUpgrade = minorUpgradePossibleWithPatchRelease(stableVersion, patchVersion)
// If the cluster version is lower than the newest patch version, we should inform about the possible upgrade
if patchUpgradePossible(clusterVersion, patchVersion) {
// The kubeadm version has to be upgraded to the latest patch version
newKubeadmVer := patchVersionStr
if kubeadmVersion.AtLeast(patchVersion) {
// In this case, the kubeadm CLI version is new enough. Don't display an update suggestion for kubeadm by making .NewKubeadmVersion equal .CurrentKubeadmVersion
newKubeadmVer = kubeadmVersionStr
}
upgrades = append(upgrades, Upgrade{
Description: description,
Before: beforeState,
After: ClusterState{
KubeVersion: patchVersionStr,
DNSVersion: dns.GetDNSVersion(patchVersion, ActiveDNSAddon(featureGates)),
KubeadmVersion: newKubeadmVer,
EtcdVersion: getSuggestedEtcdVersion(patchVersionStr),
// KubeletVersions is unset here as it is not used anywhere in .After
},
})
}
}
}
if canDoMinorUpgrade {
upgrades = append(upgrades, Upgrade{
Description: "stable version",
Before: beforeState,
After: ClusterState{
KubeVersion: stableVersionStr,
DNSVersion: dns.GetDNSVersion(stableVersion, ActiveDNSAddon(featureGates)),
KubeadmVersion: stableVersionStr,
EtcdVersion: getSuggestedEtcdVersion(stableVersionStr),
// KubeletVersions is unset here as it is not used anywhere in .After
},
})
}
if experimentalUpgradesAllowed || rcUpgradesAllowed {
// dl.k8s.io/release/latest.txt is ALWAYS an alpha.X version
// dl.k8s.io/release/latest-1.X.txt is first v1.X.0-alpha.0 -> v1.X.0-alpha.Y, then v1.X.0-beta.0 to v1.X.0-beta.Z, then v1.X.0-rc.1 to v1.X.0-rc.W.
// After the v1.X.0 release, latest-1.X.txt is always a beta.0 version. Let's say the latest stable version on the v1.7 branch is v1.7.3, then the
// latest-1.7 version is v1.7.4-beta.0
// Worth noticing is that when the release-1.X branch is cut; there are two versions tagged: v1.X.0-beta.0 AND v1.(X+1).alpha.0
// The v1.(X+1).alpha.0 is pretty much useless and should just be ignored, as more betas may be released that have more features than the initial v1.(X+1).alpha.0
// So what we do below is getting the latest overall version, always an v1.X.0-alpha.Y version. Then we get latest-1.(X-1) version. This version may be anything
// between v1.(X-1).0-beta.0 and v1.(X-1).Z-beta.0. At some point in time, latest-1.(X-1) will point to v1.(X-1).0-rc.1. Then we should show it.
// The flow looks like this (with time on the X axis):
// v1.8.0-alpha.1 -> v1.8.0-alpha.2 -> v1.8.0-alpha.3 | release-1.8 branch | v1.8.0-beta.0 -> v1.8.0-beta.1 -> v1.8.0-beta.2 -> v1.8.0-rc.1 -> v1.8.0 -> v1.8.1
// v1.9.0-alpha.0 -> v1.9.0-alpha.1 -> v1.9.0-alpha.2
// Get and output the current latest unstable version
latestVersionStr, latestVersion, err := versionGetterImpl.VersionFromCILabel("latest", "experimental version")
if err != nil {
return nil, err
}
minorUnstable := latestVersion.Components()[1]
// Get and output the current latest unstable version
previousBranch := fmt.Sprintf("latest-1.%d", minorUnstable-1)
previousBranchLatestVersionStr, previousBranchLatestVersion, err := versionGetterImpl.VersionFromCILabel(previousBranch, "")
if err != nil {
return nil, err
}
// If that previous latest version is an RC, RCs are allowed and the cluster version is lower than the RC version, show the upgrade
if rcUpgradesAllowed && rcUpgradePossible(clusterVersion, previousBranchLatestVersion) {
upgrades = append(upgrades, Upgrade{
Description: "release candidate version",
Before: beforeState,
After: ClusterState{
KubeVersion: previousBranchLatestVersionStr,
DNSVersion: dns.GetDNSVersion(previousBranchLatestVersion, ActiveDNSAddon(featureGates)),
KubeadmVersion: previousBranchLatestVersionStr,
EtcdVersion: getSuggestedEtcdVersion(previousBranchLatestVersionStr),
// KubeletVersions is unset here as it is not used anywhere in .After
},
})
}
// Show the possibility if experimental upgrades are allowed
if experimentalUpgradesAllowed && clusterVersion.LessThan(latestVersion) {
// Default to assume that the experimental version to show is the unstable one
unstableKubeVersion := latestVersionStr
unstableKubeDNSVersion := dns.GetDNSVersion(latestVersion, ActiveDNSAddon(featureGates))
// Ẃe should not display alpha.0. The previous branch's beta/rc versions are more relevant due how the kube branching process works.
if latestVersion.PreRelease() == "alpha.0" {
unstableKubeVersion = previousBranchLatestVersionStr
unstableKubeDNSVersion = dns.GetDNSVersion(previousBranchLatestVersion, ActiveDNSAddon(featureGates))
}
upgrades = append(upgrades, Upgrade{
Description: "experimental version",
Before: beforeState,
After: ClusterState{
KubeVersion: unstableKubeVersion,
DNSVersion: unstableKubeDNSVersion,
KubeadmVersion: unstableKubeVersion,
EtcdVersion: getSuggestedEtcdVersion(unstableKubeVersion),
// KubeletVersions is unset here as it is not used anywhere in .After
},
})
}
}
// Add a newline in the end of this output to leave some space to the next output section
fmt.Println("")
return upgrades, nil
}
func getBranchFromVersion(version string) string {
return strings.TrimPrefix(version, "v")[:3]
}
func patchVersionBranchExists(clusterVersion, stableVersion *version.Version) bool {
return stableVersion.AtLeast(clusterVersion)
}
func patchUpgradePossible(clusterVersion, patchVersion *version.Version) bool {
return clusterVersion.LessThan(patchVersion)
}
func rcUpgradePossible(clusterVersion, previousBranchLatestVersion *version.Version) bool {
return strings.HasPrefix(previousBranchLatestVersion.PreRelease(), "rc") && clusterVersion.LessThan(previousBranchLatestVersion)
}
func minorUpgradePossibleWithPatchRelease(stableVersion, patchVersion *version.Version) bool {
return patchVersion.LessThan(stableVersion)
}
func getSuggestedEtcdVersion(kubernetesVersion string) string {
etcdVersion, err := kubeadmconstants.EtcdSupportedVersion(kubernetesVersion)
if err != nil {
fmt.Printf("[upgrade/versions] WARNING: No recommended etcd for requested kubernetes version (%s)\n", kubernetesVersion)
return "N/A"
}
return etcdVersion.String()
}

View File

@ -0,0 +1,515 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"github.com/coreos/etcd/clientv3"
versionutil "k8s.io/kubernetes/pkg/util/version"
"reflect"
"testing"
)
type fakeVersionGetter struct {
clusterVersion, kubeadmVersion, stableVersion, latestVersion, latestDevBranchVersion, stablePatchVersion, kubeletVersion string
}
var _ VersionGetter = &fakeVersionGetter{}
// ClusterVersion gets a fake API server version
func (f *fakeVersionGetter) ClusterVersion() (string, *versionutil.Version, error) {
return f.clusterVersion, versionutil.MustParseSemantic(f.clusterVersion), nil
}
// KubeadmVersion gets a fake kubeadm version
func (f *fakeVersionGetter) KubeadmVersion() (string, *versionutil.Version, error) {
return f.kubeadmVersion, versionutil.MustParseSemantic(f.kubeadmVersion), nil
}
// VersionFromCILabel gets fake latest versions from CI
func (f *fakeVersionGetter) VersionFromCILabel(ciVersionLabel, _ string) (string, *versionutil.Version, error) {
if ciVersionLabel == "stable" {
return f.stableVersion, versionutil.MustParseSemantic(f.stableVersion), nil
}
if ciVersionLabel == "latest" {
return f.latestVersion, versionutil.MustParseSemantic(f.latestVersion), nil
}
if ciVersionLabel == "latest-1.9" {
return f.latestDevBranchVersion, versionutil.MustParseSemantic(f.latestDevBranchVersion), nil
}
return f.stablePatchVersion, versionutil.MustParseSemantic(f.stablePatchVersion), nil
}
// KubeletVersions gets the versions of the kubelets in the cluster
func (f *fakeVersionGetter) KubeletVersions() (map[string]uint16, error) {
return map[string]uint16{
f.kubeletVersion: 1,
}, nil
}
type fakeEtcdCluster struct{}
func (f fakeEtcdCluster) GetEtcdClusterStatus() (*clientv3.StatusResponse, error) {
client := &clientv3.StatusResponse{}
client.Version = "3.0.14"
return client, nil
}
func TestGetAvailableUpgrades(t *testing.T) {
featureGates := make(map[string]bool)
tests := []struct {
vg *fakeVersionGetter
expectedUpgrades []Upgrade
allowExperimental, allowRCs bool
errExpected bool
}{
{ // no action needed, already up-to-date
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.8.3",
stablePatchVersion: "v1.8.3",
stableVersion: "v1.8.3",
},
expectedUpgrades: []Upgrade{},
allowExperimental: false,
errExpected: false,
},
{ // simple patch version upgrade
vg: &fakeVersionGetter{
clusterVersion: "v1.8.1",
kubeletVersion: "v1.8.1", // the kubelet are on the same version as the control plane
kubeadmVersion: "v1.8.2",
stablePatchVersion: "v1.8.3",
stableVersion: "v1.8.3",
},
expectedUpgrades: []Upgrade{
{
Description: "version in the v1.8 series",
Before: ClusterState{
KubeVersion: "v1.8.1",
KubeletVersions: map[string]uint16{
"v1.8.1": 1,
},
KubeadmVersion: "v1.8.2",
DNSVersion: "1.14.5",
EtcdVersion: "3.0.14",
},
After: ClusterState{
KubeVersion: "v1.8.3",
KubeadmVersion: "v1.8.3",
DNSVersion: "1.14.5",
EtcdVersion: "3.0.17",
},
},
},
allowExperimental: false,
errExpected: false,
},
{ // minor version upgrade only
vg: &fakeVersionGetter{
clusterVersion: "v1.8.1",
kubeletVersion: "v1.8.1", // the kubelet are on the same version as the control plane
kubeadmVersion: "v1.9.0",
stablePatchVersion: "v1.8.1",
stableVersion: "v1.9.0",
},
expectedUpgrades: []Upgrade{
{
Description: "stable version",
Before: ClusterState{
KubeVersion: "v1.8.1",
KubeletVersions: map[string]uint16{
"v1.8.1": 1,
},
KubeadmVersion: "v1.9.0",
DNSVersion: "1.14.5",
EtcdVersion: "3.0.14",
},
After: ClusterState{
KubeVersion: "v1.9.0",
KubeadmVersion: "v1.9.0",
DNSVersion: "1.14.7",
EtcdVersion: "3.1.10",
},
},
},
allowExperimental: false,
errExpected: false,
},
{ // both minor version upgrade and patch version upgrade available
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3", // the kubelet are on the same version as the control plane
kubeadmVersion: "v1.8.5",
stablePatchVersion: "v1.8.5",
stableVersion: "v1.9.1",
},
expectedUpgrades: []Upgrade{
{
Description: "version in the v1.8 series",
Before: ClusterState{
KubeVersion: "v1.8.3",
KubeletVersions: map[string]uint16{
"v1.8.3": 1,
},
KubeadmVersion: "v1.8.5",
DNSVersion: "1.14.5",
EtcdVersion: "3.0.14",
},
After: ClusterState{
KubeVersion: "v1.8.5",
KubeadmVersion: "v1.8.5", // Note: The kubeadm version mustn't be "downgraded" here
DNSVersion: "1.14.5",
EtcdVersion: "3.0.17",
},
},
{
Description: "stable version",
Before: ClusterState{
KubeVersion: "v1.8.3",
KubeletVersions: map[string]uint16{
"v1.8.3": 1,
},
KubeadmVersion: "v1.8.5",
DNSVersion: "1.14.5",
EtcdVersion: "3.0.14",
},
After: ClusterState{
KubeVersion: "v1.9.1",
KubeadmVersion: "v1.9.1",
DNSVersion: "1.14.7",
EtcdVersion: "3.1.10",
},
},
},
allowExperimental: false,
errExpected: false,
},
{ // allow experimental upgrades, but no upgrade available
vg: &fakeVersionGetter{
clusterVersion: "v1.9.0-alpha.2",
kubeletVersion: "v1.8.5",
kubeadmVersion: "v1.8.5",
stablePatchVersion: "v1.8.5",
stableVersion: "v1.8.5",
latestVersion: "v1.9.0-alpha.2",
},
expectedUpgrades: []Upgrade{},
allowExperimental: true,
errExpected: false,
},
{ // upgrade to an unstable version should be supported
vg: &fakeVersionGetter{
clusterVersion: "v1.8.5",
kubeletVersion: "v1.8.5",
kubeadmVersion: "v1.8.5",
stablePatchVersion: "v1.8.5",
stableVersion: "v1.8.5",
latestVersion: "v1.9.0-alpha.2",
},
expectedUpgrades: []Upgrade{
{
Description: "experimental version",
Before: ClusterState{
KubeVersion: "v1.8.5",
KubeletVersions: map[string]uint16{
"v1.8.5": 1,
},
KubeadmVersion: "v1.8.5",
DNSVersion: "1.14.5",
EtcdVersion: "3.0.14",
},
After: ClusterState{
KubeVersion: "v1.9.0-alpha.2",
KubeadmVersion: "v1.9.0-alpha.2",
DNSVersion: "1.14.7",
EtcdVersion: "3.1.10",
},
},
},
allowExperimental: true,
errExpected: false,
},
{ // upgrade from an unstable version to an unstable version should be supported
vg: &fakeVersionGetter{
clusterVersion: "v1.9.0-alpha.1",
kubeletVersion: "v1.8.5",
kubeadmVersion: "v1.8.5",
stablePatchVersion: "v1.8.5",
stableVersion: "v1.8.5",
latestVersion: "v1.9.0-alpha.2",
},
expectedUpgrades: []Upgrade{
{
Description: "experimental version",
Before: ClusterState{
KubeVersion: "v1.9.0-alpha.1",
KubeletVersions: map[string]uint16{
"v1.8.5": 1,
},
KubeadmVersion: "v1.8.5",
DNSVersion: "1.14.7",
EtcdVersion: "3.0.14",
},
After: ClusterState{
KubeVersion: "v1.9.0-alpha.2",
KubeadmVersion: "v1.9.0-alpha.2",
DNSVersion: "1.14.7",
EtcdVersion: "3.1.10",
},
},
},
allowExperimental: true,
errExpected: false,
},
{ // v1.X.0-alpha.0 should be ignored
vg: &fakeVersionGetter{
clusterVersion: "v1.8.5",
kubeletVersion: "v1.8.5",
kubeadmVersion: "v1.8.5",
stablePatchVersion: "v1.8.5",
stableVersion: "v1.8.5",
latestDevBranchVersion: "v1.9.0-beta.1",
latestVersion: "v1.10.0-alpha.0",
},
expectedUpgrades: []Upgrade{
{
Description: "experimental version",
Before: ClusterState{
KubeVersion: "v1.8.5",
KubeletVersions: map[string]uint16{
"v1.8.5": 1,
},
KubeadmVersion: "v1.8.5",
DNSVersion: "1.14.5",
EtcdVersion: "3.0.14",
},
After: ClusterState{
KubeVersion: "v1.9.0-beta.1",
KubeadmVersion: "v1.9.0-beta.1",
DNSVersion: "1.14.7",
EtcdVersion: "3.1.10",
},
},
},
allowExperimental: true,
errExpected: false,
},
{ // upgrade to an RC version should be supported
vg: &fakeVersionGetter{
clusterVersion: "v1.8.5",
kubeletVersion: "v1.8.5",
kubeadmVersion: "v1.8.5",
stablePatchVersion: "v1.8.5",
stableVersion: "v1.8.5",
latestDevBranchVersion: "v1.9.0-rc.1",
latestVersion: "v1.10.0-alpha.1",
},
expectedUpgrades: []Upgrade{
{
Description: "release candidate version",
Before: ClusterState{
KubeVersion: "v1.8.5",
KubeletVersions: map[string]uint16{
"v1.8.5": 1,
},
KubeadmVersion: "v1.8.5",
DNSVersion: "1.14.5",
EtcdVersion: "3.0.14",
},
After: ClusterState{
KubeVersion: "v1.9.0-rc.1",
KubeadmVersion: "v1.9.0-rc.1",
DNSVersion: "1.14.7",
EtcdVersion: "3.1.10",
},
},
},
allowRCs: true,
errExpected: false,
},
{ // it is possible (but very uncommon) that the latest version from the previous branch is an rc and the current latest version is alpha.0. In that case, show the RC
vg: &fakeVersionGetter{
clusterVersion: "v1.8.5",
kubeletVersion: "v1.8.5",
kubeadmVersion: "v1.8.5",
stablePatchVersion: "v1.8.5",
stableVersion: "v1.8.5",
latestDevBranchVersion: "v1.9.6-rc.1",
latestVersion: "v1.10.1-alpha.0",
},
expectedUpgrades: []Upgrade{
{
Description: "experimental version", // Note that this is considered an experimental version in this uncommon scenario
Before: ClusterState{
KubeVersion: "v1.8.5",
KubeletVersions: map[string]uint16{
"v1.8.5": 1,
},
KubeadmVersion: "v1.8.5",
DNSVersion: "1.14.5",
EtcdVersion: "3.0.14",
},
After: ClusterState{
KubeVersion: "v1.9.6-rc.1",
KubeadmVersion: "v1.9.6-rc.1",
DNSVersion: "1.14.7",
EtcdVersion: "3.1.10",
},
},
},
allowExperimental: true,
errExpected: false,
},
{ // upgrade to an RC version should be supported. There may also be an even newer unstable version.
vg: &fakeVersionGetter{
clusterVersion: "v1.8.5",
kubeletVersion: "v1.8.5",
kubeadmVersion: "v1.8.5",
stablePatchVersion: "v1.8.5",
stableVersion: "v1.8.5",
latestDevBranchVersion: "v1.9.0-rc.1",
latestVersion: "v1.10.0-alpha.2",
},
expectedUpgrades: []Upgrade{
{
Description: "release candidate version",
Before: ClusterState{
KubeVersion: "v1.8.5",
KubeletVersions: map[string]uint16{
"v1.8.5": 1,
},
KubeadmVersion: "v1.8.5",
DNSVersion: "1.14.5",
EtcdVersion: "3.0.14",
},
After: ClusterState{
KubeVersion: "v1.9.0-rc.1",
KubeadmVersion: "v1.9.0-rc.1",
DNSVersion: "1.14.7",
EtcdVersion: "3.1.10",
},
},
{
Description: "experimental version",
Before: ClusterState{
KubeVersion: "v1.8.5",
KubeletVersions: map[string]uint16{
"v1.8.5": 1,
},
KubeadmVersion: "v1.8.5",
DNSVersion: "1.14.5",
EtcdVersion: "3.0.14",
},
After: ClusterState{
KubeVersion: "v1.10.0-alpha.2",
KubeadmVersion: "v1.10.0-alpha.2",
DNSVersion: "1.14.7",
EtcdVersion: "3.1.10",
},
},
},
allowRCs: true,
allowExperimental: true,
errExpected: false,
},
}
// Instantiating a fake etcd cluster for being able to get etcd version for a corresponding
// kubernetes release.
testCluster := fakeEtcdCluster{}
for _, rt := range tests {
actualUpgrades, actualErr := GetAvailableUpgrades(rt.vg, rt.allowExperimental, rt.allowRCs, testCluster, featureGates)
if !reflect.DeepEqual(actualUpgrades, rt.expectedUpgrades) {
t.Errorf("failed TestGetAvailableUpgrades\n\texpected upgrades: %v\n\tgot: %v", rt.expectedUpgrades, actualUpgrades)
}
if (actualErr != nil) != rt.errExpected {
t.Errorf("failed TestGetAvailableUpgrades\n\texpected error: %t\n\tgot error: %t", rt.errExpected, (actualErr != nil))
}
}
}
func TestKubeletUpgrade(t *testing.T) {
tests := []struct {
before map[string]uint16
after string
expected bool
}{
{ // upgrade available
before: map[string]uint16{
"v1.7.1": 1,
},
after: "v1.7.3",
expected: true,
},
{ // upgrade available
before: map[string]uint16{
"v1.7.1": 1,
"v1.7.3": 100,
},
after: "v1.7.3",
expected: true,
},
{ // upgrade not available
before: map[string]uint16{
"v1.7.3": 1,
},
after: "v1.7.3",
expected: false,
},
{ // upgrade not available
before: map[string]uint16{
"v1.7.3": 100,
},
after: "v1.7.3",
expected: false,
},
{ // upgrade not available if we don't know anything about the earlier state
before: map[string]uint16{},
after: "v1.7.3",
expected: false,
},
}
for _, rt := range tests {
upgrade := Upgrade{
Before: ClusterState{
KubeletVersions: rt.before,
},
After: ClusterState{
KubeVersion: rt.after,
},
}
actual := upgrade.CanUpgradeKubelets()
if actual != rt.expected {
t.Errorf("failed TestKubeletUpgrade\n\texpected: %t\n\tgot: %t\n\ttest object: %v", rt.expected, actual, upgrade)
}
}
}

View File

@ -0,0 +1,107 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"fmt"
"io"
"io/ioutil"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clientset "k8s.io/client-go/kubernetes"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config"
"k8s.io/kubernetes/pkg/api/legacyscheme"
)
// FetchConfiguration fetches configuration required for upgrading your cluster from a file (which has precedence) or a ConfigMap in the cluster
func FetchConfiguration(client clientset.Interface, w io.Writer, cfgPath string) (*kubeadmapiext.MasterConfiguration, error) {
fmt.Println("[upgrade/config] Making sure the configuration is correct:")
// Load the configuration from a file or the cluster
configBytes, err := loadConfigurationBytes(client, w, cfgPath)
if err != nil {
return nil, err
}
// Take the versioned configuration populated from the configmap, default it and validate
// Return the internal version of the API object
versionedcfg, err := bytesToValidatedMasterConfig(configBytes)
if err != nil {
return nil, fmt.Errorf("could not decode configuration: %v", err)
}
return versionedcfg, nil
}
// loadConfigurationBytes loads the configuration byte slice from either a file or the cluster ConfigMap
func loadConfigurationBytes(client clientset.Interface, w io.Writer, cfgPath string) ([]byte, error) {
if cfgPath != "" {
fmt.Printf("[upgrade/config] Reading configuration options from a file: %s\n", cfgPath)
return ioutil.ReadFile(cfgPath)
}
fmt.Println("[upgrade/config] Reading configuration from the cluster...")
configMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(constants.MasterConfigurationConfigMap, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
fmt.Printf("[upgrade/config] In order to upgrade, a ConfigMap called %q in the %s namespace must exist.\n", constants.MasterConfigurationConfigMap, metav1.NamespaceSystem)
fmt.Println("[upgrade/config] Without this information, 'kubeadm upgrade' won't know how to configure your upgraded cluster.")
fmt.Println("")
fmt.Println("[upgrade/config] Next steps:")
fmt.Printf("\t- OPTION 1: Run 'kubeadm config upload from-flags' and specify the same CLI arguments you passed to 'kubeadm init' when you created your master.\n")
fmt.Printf("\t- OPTION 2: Run 'kubeadm config upload from-file' and specify the same config file you passed to 'kubeadm init' when you created your master.\n")
fmt.Printf("\t- OPTION 3: Pass a config file to 'kubeadm upgrade' using the --config flag.\n")
fmt.Println("")
return []byte{}, fmt.Errorf("the ConfigMap %q in the %s namespace used for getting configuration information was not found", constants.MasterConfigurationConfigMap, metav1.NamespaceSystem)
} else if err != nil {
return []byte{}, fmt.Errorf("an unexpected error happened when trying to get the ConfigMap %q in the %s namespace: %v", constants.MasterConfigurationConfigMap, metav1.NamespaceSystem, err)
}
fmt.Printf("[upgrade/config] FYI: You can look at this config file with 'kubectl -n %s get cm %s -oyaml'\n", metav1.NamespaceSystem, constants.MasterConfigurationConfigMap)
return []byte(configMap.Data[constants.MasterConfigurationConfigMapKey]), nil
}
// bytesToValidatedMasterConfig converts a byte array to an external, defaulted and validated configuration object
func bytesToValidatedMasterConfig(b []byte) (*kubeadmapiext.MasterConfiguration, error) {
cfg := &kubeadmapiext.MasterConfiguration{}
finalCfg := &kubeadmapiext.MasterConfiguration{}
internalcfg := &kubeadmapi.MasterConfiguration{}
if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), b, cfg); err != nil {
return nil, fmt.Errorf("unable to decode config from bytes: %v", err)
}
// Default and convert to the internal version
legacyscheme.Scheme.Default(cfg)
legacyscheme.Scheme.Convert(cfg, internalcfg, nil)
// Applies dynamic defaults to settings not provided with flags
if err := configutil.SetInitDynamicDefaults(internalcfg); err != nil {
return nil, err
}
// Validates cfg (flags/configs + defaults + dynamic defaults)
if err := validation.ValidateMasterConfiguration(internalcfg).ToAggregate(); err != nil {
return nil, err
}
// Finally converts back to the external version
legacyscheme.Scheme.Convert(internalcfg, finalCfg, nil)
return finalCfg, nil
}

View File

@ -0,0 +1,214 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"fmt"
"net/http"
"os"
apps "k8s.io/api/apps/v1beta2"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/preflight"
)
// healthCheck is a helper struct for easily performing healthchecks against the cluster and printing the output
type healthCheck struct {
name string
client clientset.Interface
// f is invoked with a k8s client passed to it. Should return an optional error
f func(clientset.Interface) error
}
// Check is part of the preflight.Checker interface
func (c *healthCheck) Check() (warnings, errors []error) {
if err := c.f(c.client); err != nil {
return nil, []error{err}
}
return nil, nil
}
// Name is part of the preflight.Checker interface
func (c *healthCheck) Name() string {
return c.name
}
// CheckClusterHealth makes sure:
// - the API /healthz endpoint is healthy
// - all master Nodes are Ready
// - (if self-hosted) that there are DaemonSets with at least one Pod for all control plane components
// - (if static pod-hosted) that all required Static Pod manifests exist on disk
func CheckClusterHealth(client clientset.Interface, ignoreChecksErrors sets.String) error {
fmt.Println("[upgrade] Making sure the cluster is healthy:")
healthChecks := []preflight.Checker{
&healthCheck{
name: "APIServerHealth",
client: client,
f: apiServerHealthy,
},
&healthCheck{
name: "MasterNodesReady",
client: client,
f: masterNodesReady,
},
// TODO: Add a check for ComponentStatuses here?
}
// Run slightly different health checks depending on control plane hosting type
if IsControlPlaneSelfHosted(client) {
healthChecks = append(healthChecks, &healthCheck{
name: "ControlPlaneHealth",
client: client,
f: controlPlaneHealth,
})
} else {
healthChecks = append(healthChecks, &healthCheck{
name: "StaticPodManifest",
client: client,
f: staticPodManifestHealth,
})
}
return preflight.RunChecks(healthChecks, os.Stderr, ignoreChecksErrors)
}
// apiServerHealthy checks whether the API server's /healthz endpoint is healthy
func apiServerHealthy(client clientset.Interface) error {
healthStatus := 0
// If client.Discovery().RESTClient() is nil, the fake client is used, and that means we are dry-running. Just proceed
if client.Discovery().RESTClient() == nil {
return nil
}
client.Discovery().RESTClient().Get().AbsPath("/healthz").Do().StatusCode(&healthStatus)
if healthStatus != http.StatusOK {
return fmt.Errorf("the API Server is unhealthy; /healthz didn't return %q", "ok")
}
return nil
}
// masterNodesReady checks whether all master Nodes in the cluster are in the Running state
func masterNodesReady(client clientset.Interface) error {
selector := labels.SelectorFromSet(labels.Set(map[string]string{
constants.LabelNodeRoleMaster: "",
}))
masters, err := client.CoreV1().Nodes().List(metav1.ListOptions{
LabelSelector: selector.String(),
})
if err != nil {
return fmt.Errorf("couldn't list masters in cluster: %v", err)
}
if len(masters.Items) == 0 {
return fmt.Errorf("failed to find any nodes with master role")
}
notReadyMasters := getNotReadyNodes(masters.Items)
if len(notReadyMasters) != 0 {
return fmt.Errorf("there are NotReady masters in the cluster: %v", notReadyMasters)
}
return nil
}
// controlPlaneHealth ensures all control plane DaemonSets are healthy
func controlPlaneHealth(client clientset.Interface) error {
notReadyDaemonSets, err := getNotReadyDaemonSets(client)
if err != nil {
return err
}
if len(notReadyDaemonSets) != 0 {
return fmt.Errorf("there are control plane DaemonSets in the cluster that are not ready: %v", notReadyDaemonSets)
}
return nil
}
// staticPodManifestHealth makes sure the required static pods are presents
func staticPodManifestHealth(_ clientset.Interface) error {
nonExistentManifests := []string{}
for _, component := range constants.MasterComponents {
manifestFile := constants.GetStaticPodFilepath(component, constants.GetStaticPodDirectory())
if _, err := os.Stat(manifestFile); os.IsNotExist(err) {
nonExistentManifests = append(nonExistentManifests, manifestFile)
}
}
if len(nonExistentManifests) == 0 {
return nil
}
return fmt.Errorf("The control plane seems to be Static Pod-hosted, but some of the manifests don't seem to exist on disk. This probably means you're running 'kubeadm upgrade' on a remote machine, which is not supported for a Static Pod-hosted cluster. Manifest files not found: %v", nonExistentManifests)
}
// IsControlPlaneSelfHosted returns whether the control plane is self hosted or not
func IsControlPlaneSelfHosted(client clientset.Interface) bool {
notReadyDaemonSets, err := getNotReadyDaemonSets(client)
if err != nil {
return false
}
// If there are no NotReady DaemonSets, we are using self-hosting
return len(notReadyDaemonSets) == 0
}
// getNotReadyDaemonSets gets the amount of Ready control plane DaemonSets
func getNotReadyDaemonSets(client clientset.Interface) ([]error, error) {
notReadyDaemonSets := []error{}
for _, component := range constants.MasterComponents {
dsName := constants.AddSelfHostedPrefix(component)
ds, err := client.AppsV1beta2().DaemonSets(metav1.NamespaceSystem).Get(dsName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("couldn't get daemonset %q in the %s namespace", dsName, metav1.NamespaceSystem)
}
if err := daemonSetHealth(&ds.Status); err != nil {
notReadyDaemonSets = append(notReadyDaemonSets, fmt.Errorf("DaemonSet %q not healthy: %v", dsName, err))
}
}
return notReadyDaemonSets, nil
}
// daemonSetHealth is a helper function for getting the health of a DaemonSet's status
func daemonSetHealth(dsStatus *apps.DaemonSetStatus) error {
if dsStatus.CurrentNumberScheduled != dsStatus.DesiredNumberScheduled {
return fmt.Errorf("current number of scheduled Pods ('%d') doesn't match the amount of desired Pods ('%d')", dsStatus.CurrentNumberScheduled, dsStatus.DesiredNumberScheduled)
}
if dsStatus.NumberAvailable == 0 {
return fmt.Errorf("no available Pods for DaemonSet")
}
if dsStatus.NumberReady == 0 {
return fmt.Errorf("no ready Pods for DaemonSet")
}
return nil
}
// getNotReadyNodes returns a string slice of nodes in the cluster that are NotReady
func getNotReadyNodes(nodes []v1.Node) []string {
notReadyNodes := []string{}
for _, node := range nodes {
for _, condition := range node.Status.Conditions {
if condition.Type == v1.NodeReady && condition.Status != v1.ConditionTrue {
notReadyNodes = append(notReadyNodes, node.ObjectMeta.Name)
}
}
}
return notReadyNodes
}

View File

@ -0,0 +1,179 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"fmt"
"strings"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/pkg/util/version"
)
const (
// MaximumAllowedMinorVersionUpgradeSkew describes how many minor versions kubeadm can upgrade the control plane version in one go
MaximumAllowedMinorVersionUpgradeSkew = 1
// MaximumAllowedMinorVersionDowngradeSkew describes how many minor versions kubeadm can upgrade the control plane version in one go
MaximumAllowedMinorVersionDowngradeSkew = 1
// MaximumAllowedMinorVersionKubeletSkew describes how many minor versions the control plane version and the kubelet can skew in a kubeadm cluster
MaximumAllowedMinorVersionKubeletSkew = 1
)
// VersionSkewPolicyErrors describes version skew errors that might be seen during the validation process in EnforceVersionPolicies
type VersionSkewPolicyErrors struct {
Mandatory []error
Skippable []error
}
// EnforceVersionPolicies enforces that the proposed new version is compatible with all the different version skew policies
func EnforceVersionPolicies(versionGetter VersionGetter, newK8sVersionStr string, newK8sVersion *version.Version, allowExperimentalUpgrades, allowRCUpgrades bool) *VersionSkewPolicyErrors {
skewErrors := &VersionSkewPolicyErrors{
Mandatory: []error{},
Skippable: []error{},
}
clusterVersionStr, clusterVersion, err := versionGetter.ClusterVersion()
if err != nil {
// This case can't be forced: kubeadm has to be able to lookup cluster version for upgrades to work
skewErrors.Mandatory = append(skewErrors.Mandatory, fmt.Errorf("Unable to fetch cluster version: %v", err))
return skewErrors
}
kubeadmVersionStr, kubeadmVersion, err := versionGetter.KubeadmVersion()
if err != nil {
// This case can't be forced: kubeadm has to be able to lookup its version for upgrades to work
skewErrors.Mandatory = append(skewErrors.Mandatory, fmt.Errorf("Unable to fetch kubeadm version: %v", err))
return skewErrors
}
kubeletVersions, err := versionGetter.KubeletVersions()
if err != nil {
// This is a non-critical error; continue although kubeadm couldn't look this up
skewErrors.Skippable = append(skewErrors.Skippable, fmt.Errorf("Unable to fetch kubelet version: %v", err))
}
// Make sure the new version is a supported version (higher than the minimum one supported)
if constants.MinimumControlPlaneVersion.AtLeast(newK8sVersion) {
// This must not happen, kubeadm always supports a minimum version; and we can't go below that
skewErrors.Mandatory = append(skewErrors.Mandatory, fmt.Errorf("Specified version to upgrade to %q is equal to or lower than the minimum supported version %q. Please specify a higher version to upgrade to", newK8sVersionStr, clusterVersionStr))
}
// kubeadm doesn't support upgrades between two minor versions; e.g. a v1.7 -> v1.9 upgrade is not supported right away
if newK8sVersion.Minor() > clusterVersion.Minor()+MaximumAllowedMinorVersionUpgradeSkew {
tooLargeUpgradeSkewErr := fmt.Errorf("Specified version to upgrade to %q is too high; kubeadm can upgrade only %d minor version at a time", newK8sVersionStr, MaximumAllowedMinorVersionUpgradeSkew)
// If the version that we're about to upgrade to is a released version, we should fully enforce this policy
// If the version is a CI/dev/experimental version, it's okay to jump two minor version steps, but then require the -f flag
if len(newK8sVersion.PreRelease()) == 0 {
skewErrors.Mandatory = append(skewErrors.Mandatory, tooLargeUpgradeSkewErr)
} else {
skewErrors.Skippable = append(skewErrors.Skippable, tooLargeUpgradeSkewErr)
}
}
// kubeadm doesn't support downgrades between two minor versions; e.g. a v1.9 -> v1.7 downgrade is not supported right away
if newK8sVersion.Minor() < clusterVersion.Minor()-MaximumAllowedMinorVersionDowngradeSkew {
tooLargeDowngradeSkewErr := fmt.Errorf("Specified version to downgrade to %q is too low; kubeadm can downgrade only %d minor version at a time", newK8sVersionStr, MaximumAllowedMinorVersionDowngradeSkew)
// If the version that we're about to downgrade to is a released version, we should fully enforce this policy
// If the version is a CI/dev/experimental version, it's okay to jump two minor version steps, but then require the -f flag
if len(newK8sVersion.PreRelease()) == 0 {
skewErrors.Mandatory = append(skewErrors.Mandatory, tooLargeDowngradeSkewErr)
} else {
skewErrors.Skippable = append(skewErrors.Skippable, tooLargeDowngradeSkewErr)
}
}
// If the kubeadm version is lower than what we want to upgrade to; error
if kubeadmVersion.LessThan(newK8sVersion) {
if newK8sVersion.Minor() > kubeadmVersion.Minor() {
tooLargeKubeadmSkew := fmt.Errorf("Specified version to upgrade to %q is at least one minor release higher than the kubeadm minor release (%d > %d). Such an upgrade is not supported", newK8sVersionStr, newK8sVersion.Minor(), kubeadmVersion.Minor())
// This is unsupported; kubeadm has no idea how it should handle a newer minor release than itself
// If the version is a CI/dev/experimental version though, lower the severity of this check, but then require the -f flag
if len(newK8sVersion.PreRelease()) == 0 {
skewErrors.Mandatory = append(skewErrors.Mandatory, tooLargeKubeadmSkew)
} else {
skewErrors.Skippable = append(skewErrors.Skippable, tooLargeKubeadmSkew)
}
} else {
// Upgrading to a higher patch version than kubeadm is ok if the user specifies --force. Not recommended, but possible.
skewErrors.Skippable = append(skewErrors.Skippable, fmt.Errorf("Specified version to upgrade to %q is higher than the kubeadm version %q. Upgrade kubeadm first using the tool you used to install kubeadm", newK8sVersionStr, kubeadmVersionStr))
}
}
// Detect if the version is unstable and the user didn't allow that
if err = detectUnstableVersionError(newK8sVersion, newK8sVersionStr, allowExperimentalUpgrades, allowRCUpgrades); err != nil {
skewErrors.Skippable = append(skewErrors.Skippable, err)
}
// Detect if there are too old kubelets in the cluster
// Check for nil here since this is the only case where kubeletVersions can be nil; if KubeletVersions() returned an error
// However, it's okay to skip that check
if kubeletVersions != nil {
if err = detectTooOldKubelets(newK8sVersion, kubeletVersions); err != nil {
skewErrors.Skippable = append(skewErrors.Skippable, err)
}
}
// If we did not see any errors, return nil
if len(skewErrors.Skippable) == 0 && len(skewErrors.Mandatory) == 0 {
return nil
}
// Uh oh, we encountered one or more errors, return them
return skewErrors
}
// detectUnstableVersionError is a helper function for detecting if the unstable version (if specified) is allowed to be used
func detectUnstableVersionError(newK8sVersion *version.Version, newK8sVersionStr string, allowExperimentalUpgrades, allowRCUpgrades bool) error {
// Short-circuit quickly if this is not an unstable version
if len(newK8sVersion.PreRelease()) == 0 {
return nil
}
// If the user has specified that unstable versions are fine, then no error should be returned
if allowExperimentalUpgrades {
return nil
}
// If this is a release candidate and we allow such ones, everything's fine
if strings.HasPrefix(newK8sVersion.PreRelease(), "rc") && allowRCUpgrades {
return nil
}
return fmt.Errorf("Specified version to upgrade to %q is an unstable version and such upgrades weren't allowed via setting the --allow-*-upgrades flags", newK8sVersionStr)
}
// detectTooOldKubelets errors out if the kubelet versions are so old that an unsupported skew would happen if the cluster was upgraded
func detectTooOldKubelets(newK8sVersion *version.Version, kubeletVersions map[string]uint16) error {
tooOldKubeletVersions := []string{}
for versionStr := range kubeletVersions {
kubeletVersion, err := version.ParseSemantic(versionStr)
if err != nil {
return fmt.Errorf("couldn't parse kubelet version %s", versionStr)
}
if newK8sVersion.Minor() > kubeletVersion.Minor()+MaximumAllowedMinorVersionKubeletSkew {
tooOldKubeletVersions = append(tooOldKubeletVersions, versionStr)
}
}
if len(tooOldKubeletVersions) == 0 {
return nil
}
return fmt.Errorf("There are kubelets in this cluster that are too old that have these versions %v", tooOldKubeletVersions)
}

View File

@ -0,0 +1,192 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"testing"
"k8s.io/kubernetes/pkg/util/version"
)
func TestEnforceVersionPolicies(t *testing.T) {
tests := []struct {
vg *fakeVersionGetter
expectedMandatoryErrs int
expectedSkippableErrs int
allowExperimental, allowRCs bool
newK8sVersion string
}{
{ // everything ok
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.8.5",
},
newK8sVersion: "v1.8.5",
},
{ // everything ok
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.2",
kubeadmVersion: "v1.9.1",
},
newK8sVersion: "v1.9.0",
},
{ // downgrades ok
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.8.3",
},
newK8sVersion: "v1.8.2",
},
{ // upgrades without bumping the version number ok
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.8.3",
},
newK8sVersion: "v1.8.3",
},
{ // new version must be higher than v1.8.0
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.8.3",
},
newK8sVersion: "v1.7.10",
expectedMandatoryErrs: 1, // version must be higher than v1.8.0
},
{ // upgrading two minor versions in one go is not supported
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.10.0",
},
newK8sVersion: "v1.10.0",
expectedMandatoryErrs: 1, // can't upgrade two minor versions
expectedSkippableErrs: 1, // kubelet <-> apiserver skew too large
},
{ // downgrading two minor versions in one go is not supported
vg: &fakeVersionGetter{
clusterVersion: "v1.10.3",
kubeletVersion: "v1.10.3",
kubeadmVersion: "v1.10.0",
},
newK8sVersion: "v1.8.3",
expectedMandatoryErrs: 1, // can't downgrade two minor versions
},
{ // kubeadm version must be higher than the new kube version. However, patch version skews may be forced
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.8.3",
},
newK8sVersion: "v1.8.5",
expectedSkippableErrs: 1,
},
{ // kubeadm version must be higher than the new kube version. Trying to upgrade k8s to a higher minor version than kubeadm itself should never be supported
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.8.3",
},
newK8sVersion: "v1.9.0",
expectedMandatoryErrs: 1,
},
{ // the maximum skew between the cluster version and the kubelet versions should be one minor version. This may be forced through though.
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.7.8",
kubeadmVersion: "v1.9.0",
},
newK8sVersion: "v1.9.0",
expectedSkippableErrs: 1,
},
{ // experimental upgrades supported if the flag is set
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.9.0-beta.1",
},
newK8sVersion: "v1.9.0-beta.1",
allowExperimental: true,
},
{ // release candidate upgrades supported if the flag is set
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.9.0-rc.1",
},
newK8sVersion: "v1.9.0-rc.1",
allowRCs: true,
},
{ // release candidate upgrades supported if the flag is set
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.9.0-rc.1",
},
newK8sVersion: "v1.9.0-rc.1",
allowExperimental: true,
},
{ // the user should not be able to upgrade to an experimental version if they haven't opted into that
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.9.0-beta.1",
},
newK8sVersion: "v1.9.0-beta.1",
allowRCs: true,
expectedSkippableErrs: 1,
},
{ // the user should not be able to upgrade to an release candidate version if they haven't opted into that
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.9.0-rc.1",
},
newK8sVersion: "v1.9.0-rc.1",
expectedSkippableErrs: 1,
},
}
for _, rt := range tests {
newK8sVer, err := version.ParseSemantic(rt.newK8sVersion)
if err != nil {
t.Fatalf("couldn't parse version %s: %v", rt.newK8sVersion, err)
}
actualSkewErrs := EnforceVersionPolicies(rt.vg, rt.newK8sVersion, newK8sVer, rt.allowExperimental, rt.allowRCs)
if actualSkewErrs == nil {
// No errors were seen. Report unit test failure if we expected to see errors
if rt.expectedMandatoryErrs+rt.expectedSkippableErrs > 0 {
t.Errorf("failed TestEnforceVersionPolicies\n\texpected errors but got none")
}
// Otherwise, just move on with the next test
continue
}
if len(actualSkewErrs.Skippable) != rt.expectedSkippableErrs {
t.Errorf("failed TestEnforceVersionPolicies\n\texpected skippable errors: %d\n\tgot skippable errors: %d %v", rt.expectedSkippableErrs, len(actualSkewErrs.Skippable), *rt.vg)
}
if len(actualSkewErrs.Mandatory) != rt.expectedMandatoryErrs {
t.Errorf("failed TestEnforceVersionPolicies\n\texpected mandatory errors: %d\n\tgot mandatory errors: %d %v", rt.expectedMandatoryErrs, len(actualSkewErrs.Mandatory), *rt.vg)
}
}
}

View File

@ -0,0 +1,154 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"fmt"
"os"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/features"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/proxy"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo"
nodebootstraptoken "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node"
certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
dryrunutil "k8s.io/kubernetes/cmd/kubeadm/app/util/dryrun"
"k8s.io/kubernetes/pkg/util/version"
)
// PerformPostUpgradeTasks runs nearly the same functions as 'kubeadm init' would do
// Note that the markmaster phase is left out, not needed, and no token is created as that doesn't belong to the upgrade
func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterConfiguration, newK8sVer *version.Version, dryRun bool) error {
errs := []error{}
// Upload currently used configuration to the cluster
// Note: This is done right in the beginning of cluster initialization; as we might want to make other phases
// depend on centralized information from this source in the future
if err := uploadconfig.UploadConfiguration(cfg, client); err != nil {
errs = append(errs, err)
}
// Create/update RBAC rules that makes the bootstrap tokens able to post CSRs
if err := nodebootstraptoken.AllowBootstrapTokensToPostCSRs(client); err != nil {
errs = append(errs, err)
}
// Create/update RBAC rules that makes the bootstrap tokens able to get their CSRs approved automatically
if err := nodebootstraptoken.AutoApproveNodeBootstrapTokens(client); err != nil {
errs = append(errs, err)
}
// Create/update RBAC rules that makes the 1.8.0+ nodes to rotate certificates and get their CSRs approved automatically
if err := nodebootstraptoken.AutoApproveNodeCertificateRotation(client); err != nil {
errs = append(errs, err)
}
// Upgrade to a self-hosted control plane if possible
if err := upgradeToSelfHosting(client, cfg, newK8sVer, dryRun); err != nil {
errs = append(errs, err)
}
// TODO: Is this needed to do here? I think that updating cluster info should probably be separate from a normal upgrade
// Create the cluster-info ConfigMap with the associated RBAC rules
// if err := clusterinfo.CreateBootstrapConfigMapIfNotExists(client, kubeadmconstants.GetAdminKubeConfigPath()); err != nil {
// return err
//}
// Create/update RBAC rules that makes the cluster-info ConfigMap reachable
if err := clusterinfo.CreateClusterInfoRBACRules(client); err != nil {
errs = append(errs, err)
}
certAndKeyDir := kubeadmapiext.DefaultCertificatesDir
shouldBackup, err := shouldBackupAPIServerCertAndKey(certAndKeyDir, newK8sVer)
// Don't fail the upgrade phase if failing to determine to backup kube-apiserver cert and key.
if err != nil {
fmt.Printf("[postupgrade] WARNING: failed to determine to backup kube-apiserver cert and key: %v", err)
} else if shouldBackup {
// Don't fail the upgrade phase if failing to backup kube-apiserver cert and key.
if err := backupAPIServerCertAndKey(certAndKeyDir); err != nil {
fmt.Printf("[postupgrade] WARNING: failed to backup kube-apiserver cert and key: %v", err)
}
if err := certsphase.CreateAPIServerCertAndKeyFiles(cfg); err != nil {
errs = append(errs, err)
}
}
// Upgrade kube-dns and kube-proxy
if err := dns.EnsureDNSAddon(cfg, client); err != nil {
errs = append(errs, err)
}
// Remove the old kube-dns deployment if coredns is now used
if !dryRun {
if err := removeOldKubeDNSDeploymentIfCoreDNSIsUsed(cfg, client); err != nil {
errs = append(errs, err)
}
}
if err := proxy.EnsureProxyAddon(cfg, client); err != nil {
errs = append(errs, err)
}
return errors.NewAggregate(errs)
}
func removeOldKubeDNSDeploymentIfCoreDNSIsUsed(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
if features.Enabled(cfg.FeatureGates, features.CoreDNS) {
return apiclient.TryRunCommand(func() error {
coreDNSDeployment, err := client.AppsV1beta2().Deployments(metav1.NamespaceSystem).Get(kubeadmconstants.CoreDNS, metav1.GetOptions{})
if err != nil {
return err
}
if coreDNSDeployment.Status.ReadyReplicas == 0 {
return fmt.Errorf("the CodeDNS deployment isn't ready yet")
}
return apiclient.DeleteDeploymentForeground(client, metav1.NamespaceSystem, kubeadmconstants.KubeDNS)
}, 10)
}
return nil
}
func upgradeToSelfHosting(client clientset.Interface, cfg *kubeadmapi.MasterConfiguration, newK8sVer *version.Version, dryRun bool) error {
if features.Enabled(cfg.FeatureGates, features.SelfHosting) && !IsControlPlaneSelfHosted(client) && newK8sVer.AtLeast(v190alpha3) {
waiter := getWaiter(dryRun, client)
// kubeadm will now convert the static Pod-hosted control plane into a self-hosted one
fmt.Println("[self-hosted] Creating self-hosted control plane.")
if err := selfhosting.CreateSelfHostedControlPlane(kubeadmconstants.GetStaticPodDirectory(), kubeadmconstants.KubernetesDir, cfg, client, waiter, dryRun); err != nil {
return fmt.Errorf("error creating self hosted control plane: %v", err)
}
}
return nil
}
// getWaiter gets the right waiter implementation for the right occasion
// TODO: Consolidate this with what's in init.go?
func getWaiter(dryRun bool, client clientset.Interface) apiclient.Waiter {
if dryRun {
return dryrunutil.NewWaiter()
}
return apiclient.NewKubeWaiter(client, 30*time.Minute, os.Stdout)
}

View File

@ -0,0 +1,106 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"crypto/x509"
"encoding/pem"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"time"
"k8s.io/apimachinery/pkg/util/errors"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/pkg/util/version"
)
// TODO: Maybe move these constants elsewhere in future releases
var v190 = version.MustParseSemantic("v1.9.0")
var v190alpha3 = version.MustParseSemantic("v1.9.0-alpha.3")
var expiry = 180 * 24 * time.Hour
// backupAPIServerCertAndKey backups the old cert and key of kube-apiserver to a specified directory.
func backupAPIServerCertAndKey(certAndKeyDir string) error {
subDir := filepath.Join(certAndKeyDir, "expired")
if err := os.Mkdir(subDir, 0766); err != nil {
return fmt.Errorf("failed to created backup directory %s: %v", subDir, err)
}
filesToMove := map[string]string{
filepath.Join(certAndKeyDir, constants.APIServerCertName): filepath.Join(subDir, constants.APIServerCertName),
filepath.Join(certAndKeyDir, constants.APIServerKeyName): filepath.Join(subDir, constants.APIServerKeyName),
}
return moveFiles(filesToMove)
}
// moveFiles moves files from one directory to another.
func moveFiles(files map[string]string) error {
filesToRecover := map[string]string{}
for from, to := range files {
if err := os.Rename(from, to); err != nil {
return rollbackFiles(filesToRecover, err)
}
filesToRecover[to] = from
}
return nil
}
// rollbackFiles moves the files back to the original directory.
func rollbackFiles(files map[string]string, originalErr error) error {
errs := []error{originalErr}
for from, to := range files {
if err := os.Rename(from, to); err != nil {
errs = append(errs, err)
}
}
return fmt.Errorf("couldn't move these files: %v. Got errors: %v", files, errors.NewAggregate(errs))
}
// shouldBackupAPIServerCertAndKey check if the new k8s version is at least 1.9.0
// and kube-apiserver will be expired in 60 days.
func shouldBackupAPIServerCertAndKey(certAndKeyDir string, newK8sVer *version.Version) (bool, error) {
if newK8sVer.LessThan(v190) {
return false, nil
}
apiServerCert := filepath.Join(certAndKeyDir, constants.APIServerCertName)
data, err := ioutil.ReadFile(apiServerCert)
if err != nil {
return false, fmt.Errorf("failed to read kube-apiserver certificate from disk: %v", err)
}
block, _ := pem.Decode(data)
if block == nil {
return false, fmt.Errorf("expected the kube-apiserver certificate to be PEM encoded")
}
certs, err := x509.ParseCertificates(block.Bytes)
if err != nil {
return false, fmt.Errorf("unable to parse certificate data: %v", err)
}
if len(certs) == 0 {
return false, fmt.Errorf("no certificate data found")
}
if time.Now().Sub(certs[0].NotBefore) > expiry {
return true, nil
}
return false, nil
}

View File

@ -0,0 +1,192 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"errors"
"os"
"path/filepath"
"strings"
"testing"
"time"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil"
testutil "k8s.io/kubernetes/cmd/kubeadm/test"
"k8s.io/kubernetes/pkg/util/version"
)
func TestBackupAPIServerCertAndKey(t *testing.T) {
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
os.Chmod(tmpdir, 0766)
certPath := filepath.Join(tmpdir, constants.APIServerCertName)
certFile, err := os.OpenFile(certPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666)
if err != nil {
t.Fatalf("Failed to create cert file %s: %v", certPath, err)
}
defer certFile.Close()
keyPath := filepath.Join(tmpdir, constants.APIServerKeyName)
keyFile, err := os.OpenFile(keyPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666)
if err != nil {
t.Fatalf("Failed to create key file %s: %v", keyPath, err)
}
defer keyFile.Close()
if err := backupAPIServerCertAndKey(tmpdir); err != nil {
t.Fatalf("Failed to backup cert and key in dir %s: %v", tmpdir, err)
}
}
func TestMoveFiles(t *testing.T) {
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
os.Chmod(tmpdir, 0766)
certPath := filepath.Join(tmpdir, constants.APIServerCertName)
certFile, err := os.OpenFile(certPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666)
if err != nil {
t.Fatalf("Failed to create cert file %s: %v", certPath, err)
}
defer certFile.Close()
keyPath := filepath.Join(tmpdir, constants.APIServerKeyName)
keyFile, err := os.OpenFile(keyPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666)
if err != nil {
t.Fatalf("Failed to create key file %s: %v", keyPath, err)
}
defer keyFile.Close()
subDir := filepath.Join(tmpdir, "expired")
if err := os.Mkdir(subDir, 0766); err != nil {
t.Fatalf("Failed to create backup directory %s: %v", subDir, err)
}
filesToMove := map[string]string{
filepath.Join(tmpdir, constants.APIServerCertName): filepath.Join(subDir, constants.APIServerCertName),
filepath.Join(tmpdir, constants.APIServerKeyName): filepath.Join(subDir, constants.APIServerKeyName),
}
if err := moveFiles(filesToMove); err != nil {
t.Fatalf("Failed to move files %v: %v", filesToMove, err)
}
}
func TestRollbackFiles(t *testing.T) {
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
os.Chmod(tmpdir, 0766)
subDir := filepath.Join(tmpdir, "expired")
if err := os.Mkdir(subDir, 0766); err != nil {
t.Fatalf("Failed to create backup directory %s: %v", subDir, err)
}
certPath := filepath.Join(subDir, constants.APIServerCertName)
certFile, err := os.OpenFile(certPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666)
if err != nil {
t.Fatalf("Failed to create cert file %s: %v", certPath, err)
}
defer certFile.Close()
keyPath := filepath.Join(subDir, constants.APIServerKeyName)
keyFile, err := os.OpenFile(keyPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666)
if err != nil {
t.Fatalf("Failed to create key file %s: %v", keyPath, err)
}
defer keyFile.Close()
filesToRollBack := map[string]string{
filepath.Join(subDir, constants.APIServerCertName): filepath.Join(tmpdir, constants.APIServerCertName),
filepath.Join(subDir, constants.APIServerKeyName): filepath.Join(tmpdir, constants.APIServerKeyName),
}
errString := "there are files need roll back"
originalErr := errors.New(errString)
err = rollbackFiles(filesToRollBack, originalErr)
if err == nil {
t.Fatalf("Expected error contains %q, got nil", errString)
}
if !strings.Contains(err.Error(), errString) {
t.Fatalf("Expected error contains %q, got %v", errString, err)
}
}
func TestShouldBackupAPIServerCertAndKey(t *testing.T) {
cfg := &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"},
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
NodeName: "test-node",
}
caCert, caKey, err := certsphase.NewCACertAndKey()
if err != nil {
t.Fatalf("failed creation of ca cert and key: %v", err)
}
for desc, test := range map[string]struct {
adjustedExpiry time.Duration
k8sVersion *version.Version
expected bool
}{
"1.8 version doesn't need to backup": {
k8sVersion: version.MustParseSemantic("v1.8.0"),
expected: false,
},
"1.9 version with cert not older than 180 days doesn't needs to backup": {
k8sVersion: version.MustParseSemantic("v1.9.0"),
expected: false,
},
"1.9 version with cert older than 180 days need to backup": {
adjustedExpiry: expiry + 100*time.Hour,
k8sVersion: version.MustParseSemantic("v1.9.0"),
expected: true,
},
} {
caCert.NotBefore = caCert.NotBefore.Add(-test.adjustedExpiry).UTC()
apiCert, apiKey, err := certsphase.NewAPIServerCertAndKey(cfg, caCert, caKey)
if err != nil {
t.Fatalf("Test %s: failed creation of cert and key: %v", desc, err)
}
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
if err := pkiutil.WriteCertAndKey(tmpdir, constants.APIServerCertAndKeyBaseName, apiCert, apiKey); err != nil {
t.Fatalf("Test %s: failure while saving %s certificate and key: %v", desc, constants.APIServerCertAndKeyBaseName, err)
}
certAndKey := []string{filepath.Join(tmpdir, constants.APIServerCertName), filepath.Join(tmpdir, constants.APIServerKeyName)}
for _, path := range certAndKey {
if _, err := os.Stat(path); os.IsNotExist(err) {
t.Fatalf("Test %s: %s not exist: %v", desc, path, err)
}
}
shouldBackup, err := shouldBackupAPIServerCertAndKey(tmpdir, test.k8sVersion)
if err != nil {
t.Fatalf("Test %s: failed to check shouldBackupAPIServerCertAndKey: %v", desc, err)
}
if shouldBackup != test.expected {
t.Fatalf("Test %s: shouldBackupAPIServerCertAndKey expected %v, got %v", desc, test.expected, shouldBackup)
}
}
}

View File

@ -0,0 +1,180 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"fmt"
"time"
apps "k8s.io/api/apps/v1beta2"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/images"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
)
const (
prepullPrefix = "upgrade-prepull-"
)
// Prepuller defines an interface for performing a prepull operation in a create-wait-delete fashion in parallel
type Prepuller interface {
CreateFunc(string) error
WaitFunc(string)
DeleteFunc(string) error
}
// DaemonSetPrepuller makes sure the control plane images are availble on all masters
type DaemonSetPrepuller struct {
client clientset.Interface
cfg *kubeadmapi.MasterConfiguration
waiter apiclient.Waiter
}
// NewDaemonSetPrepuller creates a new instance of the DaemonSetPrepuller struct
func NewDaemonSetPrepuller(client clientset.Interface, waiter apiclient.Waiter, cfg *kubeadmapi.MasterConfiguration) *DaemonSetPrepuller {
return &DaemonSetPrepuller{
client: client,
cfg: cfg,
waiter: waiter,
}
}
// CreateFunc creates a DaemonSet for making the image available on every relevant node
func (d *DaemonSetPrepuller) CreateFunc(component string) error {
image := images.GetCoreImage(component, d.cfg.GetControlPlaneImageRepository(), d.cfg.KubernetesVersion, d.cfg.UnifiedControlPlaneImage)
ds := buildPrePullDaemonSet(component, image)
// Create the DaemonSet in the API Server
if err := apiclient.CreateOrUpdateDaemonSet(d.client, ds); err != nil {
return fmt.Errorf("unable to create a DaemonSet for prepulling the component %q: %v", component, err)
}
return nil
}
// WaitFunc waits for all Pods in the specified DaemonSet to be in the Running state
func (d *DaemonSetPrepuller) WaitFunc(component string) {
fmt.Printf("[upgrade/prepull] Prepulling image for component %s.\n", component)
d.waiter.WaitForPodsWithLabel("k8s-app=upgrade-prepull-" + component)
}
// DeleteFunc deletes the DaemonSet used for making the image available on every relevant node
func (d *DaemonSetPrepuller) DeleteFunc(component string) error {
dsName := addPrepullPrefix(component)
if err := apiclient.DeleteDaemonSetForeground(d.client, metav1.NamespaceSystem, dsName); err != nil {
return fmt.Errorf("unable to cleanup the DaemonSet used for prepulling %s: %v", component, err)
}
fmt.Printf("[upgrade/prepull] Prepulled image for component %s.\n", component)
return nil
}
// PrepullImagesInParallel creates DaemonSets synchronously but waits in parallel for the images to pull
func PrepullImagesInParallel(kubePrepuller Prepuller, timeout time.Duration) error {
componentsToPrepull := constants.MasterComponents
fmt.Printf("[upgrade/prepull] Will prepull images for components %v\n", componentsToPrepull)
timeoutChan := time.After(timeout)
// Synchronously create the DaemonSets
for _, component := range componentsToPrepull {
if err := kubePrepuller.CreateFunc(component); err != nil {
return err
}
}
// Create a channel for streaming data from goroutines that run in parallell to a blocking for loop that cleans up
prePulledChan := make(chan string, len(componentsToPrepull))
for _, component := range componentsToPrepull {
go func(c string) {
// Wait as long as needed. This WaitFunc call should be blocking until completetion
kubePrepuller.WaitFunc(c)
// When the task is done, go ahead and cleanup by sending the name to the channel
prePulledChan <- c
}(component)
}
// This call blocks until all expected messages are received from the channel or errors out if timeoutChan fires.
// For every successful wait, kubePrepuller.DeleteFunc is executed
if err := waitForItemsFromChan(timeoutChan, prePulledChan, len(componentsToPrepull), kubePrepuller.DeleteFunc); err != nil {
return err
}
fmt.Println("[upgrade/prepull] Successfully prepulled the images for all the control plane components")
return nil
}
// waitForItemsFromChan waits for n elements from stringChan with a timeout. For every item received from stringChan, cleanupFunc is executed
func waitForItemsFromChan(timeoutChan <-chan time.Time, stringChan chan string, n int, cleanupFunc func(string) error) error {
i := 0
for {
select {
case <-timeoutChan:
return fmt.Errorf("The prepull operation timed out")
case result := <-stringChan:
i++
// If the cleanup function errors; error here as well
if err := cleanupFunc(result); err != nil {
return err
}
if i == n {
return nil
}
}
}
}
// addPrepullPrefix adds the prepull prefix for this functionality; can be used in names, labels, etc.
func addPrepullPrefix(component string) string {
return fmt.Sprintf("%s%s", prepullPrefix, component)
}
// buildPrePullDaemonSet builds the DaemonSet that ensures the control plane image is available
func buildPrePullDaemonSet(component, image string) *apps.DaemonSet {
var gracePeriodSecs int64
return &apps.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: addPrepullPrefix(component),
Namespace: metav1.NamespaceSystem,
},
Spec: apps.DaemonSetSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"k8s-app": addPrepullPrefix(component),
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: component,
Image: image,
Command: []string{"/bin/sleep", "3600"},
},
},
NodeSelector: map[string]string{
constants.LabelNodeRoleMaster: "",
},
Tolerations: []v1.Toleration{constants.MasterToleration},
TerminationGracePeriodSeconds: &gracePeriodSecs,
},
},
},
}
}

View File

@ -0,0 +1,145 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"fmt"
"testing"
"time"
//"k8s.io/kubernetes/pkg/util/version"
)
// failedCreatePrepuller is a fake prepuller that errors for kube-controller-manager in the CreateFunc call
type failedCreatePrepuller struct{}
func NewFailedCreatePrepuller() Prepuller {
return &failedCreatePrepuller{}
}
func (p *failedCreatePrepuller) CreateFunc(component string) error {
if component == "kube-controller-manager" {
return fmt.Errorf("boo")
}
return nil
}
func (p *failedCreatePrepuller) WaitFunc(component string) {}
func (p *failedCreatePrepuller) DeleteFunc(component string) error {
return nil
}
// foreverWaitPrepuller is a fake prepuller that basically waits "forever" (10 mins, but longer than the 10sec timeout)
type foreverWaitPrepuller struct{}
func NewForeverWaitPrepuller() Prepuller {
return &foreverWaitPrepuller{}
}
func (p *foreverWaitPrepuller) CreateFunc(component string) error {
return nil
}
func (p *foreverWaitPrepuller) WaitFunc(component string) {
time.Sleep(10 * time.Minute)
}
func (p *foreverWaitPrepuller) DeleteFunc(component string) error {
return nil
}
// failedDeletePrepuller is a fake prepuller that errors for kube-scheduler in the DeleteFunc call
type failedDeletePrepuller struct{}
func NewFailedDeletePrepuller() Prepuller {
return &failedDeletePrepuller{}
}
func (p *failedDeletePrepuller) CreateFunc(component string) error {
return nil
}
func (p *failedDeletePrepuller) WaitFunc(component string) {}
func (p *failedDeletePrepuller) DeleteFunc(component string) error {
if component == "kube-scheduler" {
return fmt.Errorf("boo")
}
return nil
}
// goodPrepuller is a fake prepuller that works as expected
type goodPrepuller struct{}
func NewGoodPrepuller() Prepuller {
return &goodPrepuller{}
}
func (p *goodPrepuller) CreateFunc(component string) error {
time.Sleep(300 * time.Millisecond)
return nil
}
func (p *goodPrepuller) WaitFunc(component string) {
time.Sleep(300 * time.Millisecond)
}
func (p *goodPrepuller) DeleteFunc(component string) error {
time.Sleep(300 * time.Millisecond)
return nil
}
func TestPrepullImagesInParallel(t *testing.T) {
tests := []struct {
p Prepuller
timeout time.Duration
expectedErr bool
}{
{ // should error out; create failed
p: NewFailedCreatePrepuller(),
timeout: 10 * time.Second,
expectedErr: true,
},
{ // should error out; timeout exceeded
p: NewForeverWaitPrepuller(),
timeout: 10 * time.Second,
expectedErr: true,
},
{ // should error out; delete failed
p: NewFailedDeletePrepuller(),
timeout: 10 * time.Second,
expectedErr: true,
},
{ // should work just fine
p: NewGoodPrepuller(),
timeout: 10 * time.Second,
expectedErr: false,
},
}
for _, rt := range tests {
actualErr := PrepullImagesInParallel(rt.p, rt.timeout)
if (actualErr != nil) != rt.expectedErr {
t.Errorf(
"failed TestPrepullImagesInParallel\n\texpected error: %t\n\tgot: %t",
rt.expectedErr,
(actualErr != nil),
)
}
}
}

View File

@ -0,0 +1,272 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"fmt"
"time"
apps "k8s.io/api/apps/v1beta2"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
"k8s.io/kubernetes/pkg/util/version"
)
const (
// upgradeTempDSPrefix is the prefix added to the temporary DaemonSet's name used during the upgrade
upgradeTempDSPrefix = "temp-upgrade-"
// upgradeTempLabel is the label key used for identifying the temporary component's DaemonSet
upgradeTempLabel = "temp-upgrade-component"
// selfHostingWaitTimeout describes the maximum amount of time a self-hosting wait process should wait before timing out
selfHostingWaitTimeout = 2 * time.Minute
// selfHostingFailureThreshold describes how many times kubeadm will retry creating the DaemonSets
selfHostingFailureThreshold int = 10
)
// controlPlaneComponentResources holds the relevant Pod and DaemonSet associated with a control plane component
type controlPlaneComponentResources struct {
pod *v1.Pod
daemonSet *apps.DaemonSet
}
// SelfHostedControlPlane upgrades a self-hosted control plane
// It works as follows:
// - The client gets the currently running DaemonSets and their associated Pods used for self-hosting the control plane
// - A temporary DaemonSet for the component in question is created; but nearly identical to the DaemonSet for the self-hosted component running right now
// - Why use this temporary DaemonSet? Because, the RollingUpdate strategy for upgrading DaemonSets first kills the old Pod, and then adds the new one
// - This doesn't work for self-hosted upgrades, as if you remove the only API server for instance you have in the cluster, the cluster essentially goes down
// - So instead, a nearly identical copy of the pre-upgrade DaemonSet is created and applied to the cluster. In the beginning, this duplicate DS is just idle
// - kubeadm waits for the temporary DaemonSet's Pod to become Running
// - kubeadm updates the real, self-hosted component. This will result in the pre-upgrade component Pod being removed from the cluster
// - Luckily, the temporary, backup DaemonSet now kicks in and takes over and acts as the control plane. It recognizes that a new Pod should be created,
// - as the "real" DaemonSet is being updated.
// - kubeadm waits for the pre-upgrade Pod to become deleted. It now takes advantage of the backup/temporary component
// - kubeadm waits for the new, upgraded DaemonSet to become Running.
// - Now that the new, upgraded DaemonSet is Running, we can delete the backup/temporary DaemonSet
// - Lastly, make sure the API /healthz endpoint still is reachable
//
// TL;DR; This is what the flow looks like in pseudo-code:
// for [kube-apiserver, kube-controller-manager, kube-scheduler], do:
// 1. Self-Hosted component v1 Running
// -> Duplicate the DaemonSet manifest
// 2. Self-Hosted component v1 Running (active). Backup component v1 Running (passive)
// -> Upgrade the Self-Hosted component v1 to v2.
// -> Self-Hosted component v1 is Deleted from the cluster
// 3. Backup component v1 Running becomes active and completes the upgrade by creating the Self-Hosted component v2 Pod (passive)
// -> Wait for Self-Hosted component v2 to become Running
// 4. Backup component v1 Running (active). Self-Hosted component v2 Running (passive)
// -> Backup component v1 is Deleted
// 5. Wait for Self-Hosted component v2 Running to become active
// 6. Repeat for all control plane components
func SelfHostedControlPlane(client clientset.Interface, waiter apiclient.Waiter, cfg *kubeadmapi.MasterConfiguration, k8sVersion *version.Version) error {
// Adjust the timeout slightly to something self-hosting specific
waiter.SetTimeout(selfHostingWaitTimeout)
// This function returns a map of DaemonSet objects ready to post to the API server
newControlPlaneDaemonSets := BuildUpgradedDaemonSetsFromConfig(cfg, k8sVersion)
controlPlaneResources, err := getCurrentControlPlaneComponentResources(client)
if err != nil {
return err
}
for _, component := range constants.MasterComponents {
// Make a shallow copy of the current DaemonSet in order to create a new, temporary one
tempDS := *controlPlaneResources[component].daemonSet
// Mutate the temp daemonset a little to be suitable for this usage (change label selectors, etc)
mutateTempDaemonSet(&tempDS, component)
// Create or update the DaemonSet in the API Server, and retry selfHostingFailureThreshold times if it errors out
if err := apiclient.TryRunCommand(func() error {
return apiclient.CreateOrUpdateDaemonSet(client, &tempDS)
}, selfHostingFailureThreshold); err != nil {
return err
}
// Wait for the temporary/backup self-hosted component to come up
if err := waiter.WaitForPodsWithLabel(buildTempUpgradeDSLabelQuery(component)); err != nil {
return err
}
newDS := newControlPlaneDaemonSets[component]
// Upgrade the component's self-hosted resource
// During this upgrade; the temporary/backup component will take over
if err := apiclient.TryRunCommand(func() error {
if _, err := client.AppsV1beta2().DaemonSets(newDS.ObjectMeta.Namespace).Update(newDS); err != nil {
return fmt.Errorf("couldn't update self-hosted component's DaemonSet: %v", err)
}
return nil
}, selfHostingFailureThreshold); err != nil {
return err
}
// Wait for the component's old Pod to disappear
oldPod := controlPlaneResources[component].pod
if err := waiter.WaitForPodToDisappear(oldPod.ObjectMeta.Name); err != nil {
return err
}
// Wait for the main, upgraded self-hosted component to come up
// Here we're talking to the temporary/backup component; the upgraded component is in the process of starting up
if err := waiter.WaitForPodsWithLabel(selfhosting.BuildSelfHostedComponentLabelQuery(component)); err != nil {
return err
}
// Delete the temporary DaemonSet, and retry selfHostingFailureThreshold times if it errors out
// In order to pivot back to the upgraded API server, we kill the temporary/backup component
if err := apiclient.TryRunCommand(func() error {
return apiclient.DeleteDaemonSetForeground(client, tempDS.ObjectMeta.Namespace, tempDS.ObjectMeta.Name)
}, selfHostingFailureThreshold); err != nil {
return err
}
// Just as an extra safety check; make sure the API server is returning ok at the /healthz endpoint
if err := waiter.WaitForAPI(); err != nil {
return err
}
fmt.Printf("[upgrade/apply] Self-hosted component %q upgraded successfully!\n", component)
}
return nil
}
// BuildUpgradedDaemonSetsFromConfig takes a config object and the current version and returns the DaemonSet objects to post to the master
func BuildUpgradedDaemonSetsFromConfig(cfg *kubeadmapi.MasterConfiguration, k8sVersion *version.Version) map[string]*apps.DaemonSet {
// Here the map of different mutators to use for the control plane's podspec is stored
mutators := selfhosting.GetMutatorsFromFeatureGates(cfg.FeatureGates)
// Get the new PodSpecs to use
controlPlanePods := controlplane.GetStaticPodSpecs(cfg, k8sVersion)
// Store the created DaemonSets in this map
controlPlaneDaemonSets := map[string]*apps.DaemonSet{}
for _, component := range constants.MasterComponents {
podSpec := controlPlanePods[component].Spec
// Build the full DaemonSet object from the PodSpec generated from the control plane phase and
// using the self-hosting mutators available from the selfhosting phase
ds := selfhosting.BuildDaemonSet(component, &podSpec, mutators)
controlPlaneDaemonSets[component] = ds
}
return controlPlaneDaemonSets
}
// addTempUpgradeDSPrefix adds the upgradeTempDSPrefix to the specified DaemonSet name
func addTempUpgradeDSPrefix(currentName string) string {
return fmt.Sprintf("%s%s", upgradeTempDSPrefix, currentName)
}
// buildTempUpgradeLabels returns the label string-string map for identifying the temporary
func buildTempUpgradeLabels(component string) map[string]string {
return map[string]string{
upgradeTempLabel: component,
}
}
// buildTempUpgradeDSLabelQuery creates the right query for matching
func buildTempUpgradeDSLabelQuery(component string) string {
return fmt.Sprintf("%s=%s", upgradeTempLabel, component)
}
// mutateTempDaemonSet mutates the specified self-hosted DaemonSet for the specified component
// in a way that makes it possible to post a nearly identical, temporary DaemonSet as a backup
func mutateTempDaemonSet(tempDS *apps.DaemonSet, component string) {
// Prefix the name of the temporary DaemonSet with upgradeTempDSPrefix
tempDS.ObjectMeta.Name = addTempUpgradeDSPrefix(tempDS.ObjectMeta.Name)
// Set .Labels to something else than the "real" self-hosted components have
tempDS.ObjectMeta.Labels = buildTempUpgradeLabels(component)
tempDS.Spec.Selector.MatchLabels = buildTempUpgradeLabels(component)
tempDS.Spec.Template.ObjectMeta.Labels = buildTempUpgradeLabels(component)
// Clean all unnecessary ObjectMeta fields
tempDS.ObjectMeta = extractRelevantObjectMeta(tempDS.ObjectMeta)
// Reset .Status as we're posting a new object
tempDS.Status = apps.DaemonSetStatus{}
}
// extractRelevantObjectMeta returns only the relevant parts of ObjectMeta required when creating
// a new, identical resource. We should not POST ResourceVersion, UUIDs, etc., only the name, labels,
// namespace and annotations should be preserved.
func extractRelevantObjectMeta(ob metav1.ObjectMeta) metav1.ObjectMeta {
return metav1.ObjectMeta{
Name: ob.Name,
Namespace: ob.Namespace,
Labels: ob.Labels,
Annotations: ob.Annotations,
}
}
// listPodsWithLabelSelector returns the relevant Pods for the given LabelSelector
func listPodsWithLabelSelector(client clientset.Interface, kvLabel string) (*v1.PodList, error) {
return client.CoreV1().Pods(metav1.NamespaceSystem).List(metav1.ListOptions{
LabelSelector: kvLabel,
})
}
// getCurrentControlPlaneComponentResources returns a string-(Pod|DaemonSet) map for later use
func getCurrentControlPlaneComponentResources(client clientset.Interface) (map[string]controlPlaneComponentResources, error) {
controlPlaneResources := map[string]controlPlaneComponentResources{}
for _, component := range constants.MasterComponents {
var podList *v1.PodList
var currentDS *apps.DaemonSet
// Get the self-hosted pod associated with the component
podLabelSelector := selfhosting.BuildSelfHostedComponentLabelQuery(component)
if err := apiclient.TryRunCommand(func() error {
var tryrunerr error
podList, tryrunerr = listPodsWithLabelSelector(client, podLabelSelector)
return tryrunerr // note that tryrunerr is most likely nil here (in successful cases)
}, selfHostingFailureThreshold); err != nil {
return nil, err
}
// Make sure that there are only one Pod with this label selector; otherwise unexpected things can happen
if len(podList.Items) > 1 {
return nil, fmt.Errorf("too many pods with label selector %q found in the %s namespace", podLabelSelector, metav1.NamespaceSystem)
}
// Get the component's DaemonSet object
dsName := constants.AddSelfHostedPrefix(component)
if err := apiclient.TryRunCommand(func() error {
var tryrunerr error
// Try to get the current self-hosted component
currentDS, tryrunerr = client.AppsV1beta2().DaemonSets(metav1.NamespaceSystem).Get(dsName, metav1.GetOptions{})
return tryrunerr // note that tryrunerr is most likely nil here (in successful cases)
}, selfHostingFailureThreshold); err != nil {
return nil, err
}
// Add the associated resources to the map to return later
controlPlaneResources[component] = controlPlaneComponentResources{
pod: &podList.Items[0],
daemonSet: currentDS,
}
}
return controlPlaneResources, nil
}

View File

@ -0,0 +1,357 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"fmt"
"os"
"strings"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane"
etcdphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd"
"k8s.io/kubernetes/cmd/kubeadm/app/util"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
"k8s.io/kubernetes/pkg/util/version"
)
// StaticPodPathManager is responsible for tracking the directories used in the static pod upgrade transition
type StaticPodPathManager interface {
// MoveFile should move a file from oldPath to newPath
MoveFile(oldPath, newPath string) error
// RealManifestPath gets the file path for the component in the "real" static pod manifest directory used by the kubelet
RealManifestPath(component string) string
// RealManifestDir should point to the static pod manifest directory used by the kubelet
RealManifestDir() string
// TempManifestPath gets the file path for the component in the temporary directory created for generating new manifests for the upgrade
TempManifestPath(component string) string
// TempManifestDir should point to the temporary directory created for generating new manifests for the upgrade
TempManifestDir() string
// BackupManifestPath gets the file path for the component in the backup directory used for backuping manifests during the transition
BackupManifestPath(component string) string
// BackupManifestDir should point to the backup directory used for backuping manifests during the transition
BackupManifestDir() string
// BackupEtcdDir should point to the backup directory used for backuping manifests during the transition
BackupEtcdDir() string
}
// KubeStaticPodPathManager is a real implementation of StaticPodPathManager that is used when upgrading a static pod cluster
type KubeStaticPodPathManager struct {
realManifestDir string
tempManifestDir string
backupManifestDir string
backupEtcdDir string
}
// NewKubeStaticPodPathManager creates a new instance of KubeStaticPodPathManager
func NewKubeStaticPodPathManager(realDir, tempDir, backupDir, backupEtcdDir string) StaticPodPathManager {
return &KubeStaticPodPathManager{
realManifestDir: realDir,
tempManifestDir: tempDir,
backupManifestDir: backupDir,
backupEtcdDir: backupEtcdDir,
}
}
// NewKubeStaticPodPathManagerUsingTempDirs creates a new instance of KubeStaticPodPathManager with temporary directories backing it
func NewKubeStaticPodPathManagerUsingTempDirs(realManifestDir string) (StaticPodPathManager, error) {
upgradedManifestsDir, err := constants.CreateTempDirForKubeadm("kubeadm-upgraded-manifests")
if err != nil {
return nil, err
}
backupManifestsDir, err := constants.CreateTempDirForKubeadm("kubeadm-backup-manifests")
if err != nil {
return nil, err
}
backupEtcdDir, err := constants.CreateTempDirForKubeadm("kubeadm-backup-etcd")
if err != nil {
return nil, err
}
return NewKubeStaticPodPathManager(realManifestDir, upgradedManifestsDir, backupManifestsDir, backupEtcdDir), nil
}
// MoveFile should move a file from oldPath to newPath
func (spm *KubeStaticPodPathManager) MoveFile(oldPath, newPath string) error {
return os.Rename(oldPath, newPath)
}
// RealManifestPath gets the file path for the component in the "real" static pod manifest directory used by the kubelet
func (spm *KubeStaticPodPathManager) RealManifestPath(component string) string {
return constants.GetStaticPodFilepath(component, spm.realManifestDir)
}
// RealManifestDir should point to the static pod manifest directory used by the kubelet
func (spm *KubeStaticPodPathManager) RealManifestDir() string {
return spm.realManifestDir
}
// TempManifestPath gets the file path for the component in the temporary directory created for generating new manifests for the upgrade
func (spm *KubeStaticPodPathManager) TempManifestPath(component string) string {
return constants.GetStaticPodFilepath(component, spm.tempManifestDir)
}
// TempManifestDir should point to the temporary directory created for generating new manifests for the upgrade
func (spm *KubeStaticPodPathManager) TempManifestDir() string {
return spm.tempManifestDir
}
// BackupManifestPath gets the file path for the component in the backup directory used for backuping manifests during the transition
func (spm *KubeStaticPodPathManager) BackupManifestPath(component string) string {
return constants.GetStaticPodFilepath(component, spm.backupManifestDir)
}
// BackupManifestDir should point to the backup directory used for backuping manifests during the transition
func (spm *KubeStaticPodPathManager) BackupManifestDir() string {
return spm.backupManifestDir
}
// BackupEtcdDir should point to the backup directory used for backuping manifests during the transition
func (spm *KubeStaticPodPathManager) BackupEtcdDir() string {
return spm.backupEtcdDir
}
func upgradeComponent(component string, waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.MasterConfiguration, beforePodHash string, recoverManifests map[string]string) error {
// Special treatment is required for etcd case, when rollbackOldManifests should roll back etcd
// manifests only for the case when component is Etcd
recoverEtcd := false
if component == constants.Etcd {
recoverEtcd = true
}
// The old manifest is here; in the /etc/kubernetes/manifests/
currentManifestPath := pathMgr.RealManifestPath(component)
// The new, upgraded manifest will be written here
newManifestPath := pathMgr.TempManifestPath(component)
// The old manifest will be moved here; into a subfolder of the temporary directory
// If a rollback is needed, these manifests will be put back to where they where initially
backupManifestPath := pathMgr.BackupManifestPath(component)
// Store the backup path in the recover list. If something goes wrong now, this component will be rolled back.
recoverManifests[component] = backupManifestPath
// Move the old manifest into the old-manifests directory
if err := pathMgr.MoveFile(currentManifestPath, backupManifestPath); err != nil {
return rollbackOldManifests(recoverManifests, err, pathMgr, recoverEtcd)
}
// Move the new manifest into the manifests directory
if err := pathMgr.MoveFile(newManifestPath, currentManifestPath); err != nil {
return rollbackOldManifests(recoverManifests, err, pathMgr, recoverEtcd)
}
fmt.Printf("[upgrade/staticpods] Moved new manifest to %q and backed up old manifest to %q\n", currentManifestPath, backupManifestPath)
fmt.Println("[upgrade/staticpods] Waiting for the kubelet to restart the component")
// Wait for the mirror Pod hash to change; otherwise we'll run into race conditions here when the kubelet hasn't had time to
// notice the removal of the Static Pod, leading to a false positive below where we check that the API endpoint is healthy
// If we don't do this, there is a case where we remove the Static Pod manifest, kubelet is slow to react, kubeadm checks the
// API endpoint below of the OLD Static Pod component and proceeds quickly enough, which might lead to unexpected results.
if err := waiter.WaitForStaticPodControlPlaneHashChange(cfg.NodeName, component, beforePodHash); err != nil {
return rollbackOldManifests(recoverManifests, err, pathMgr, recoverEtcd)
}
// Wait for the static pod component to come up and register itself as a mirror pod
if err := waiter.WaitForPodsWithLabel("component=" + component); err != nil {
return rollbackOldManifests(recoverManifests, err, pathMgr, recoverEtcd)
}
fmt.Printf("[upgrade/staticpods] Component %q upgraded successfully!\n", component)
return nil
}
// performEtcdStaticPodUpgrade performs upgrade of etcd, it returns bool which indicates fatal error or not and the actual error.
func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.MasterConfiguration, recoverManifests map[string]string) (bool, error) {
// Add etcd static pod spec only if external etcd is not configured
if len(cfg.Etcd.Endpoints) != 0 {
return false, fmt.Errorf("external etcd detected, won't try to change any etcd state")
}
// Checking health state of etcd before proceeding with the upgrtade
etcdCluster := util.LocalEtcdCluster{}
etcdStatus, err := etcdCluster.GetEtcdClusterStatus()
if err != nil {
return true, fmt.Errorf("etcd cluster is not healthy: %v", err)
}
// Backing up etcd data store
backupEtcdDir := pathMgr.BackupEtcdDir()
runningEtcdDir := cfg.Etcd.DataDir
if err := util.CopyDir(runningEtcdDir, backupEtcdDir); err != nil {
return true, fmt.Errorf("fail to back up etcd data: %v", err)
}
// Need to check currently used version and version from constants, if differs then upgrade
desiredEtcdVersion, err := constants.EtcdSupportedVersion(cfg.KubernetesVersion)
if err != nil {
return true, fmt.Errorf("failed to parse the desired etcd version(%s): %v", desiredEtcdVersion.String(), err)
}
currentEtcdVersion, err := version.ParseSemantic(etcdStatus.Version)
if err != nil {
return true, fmt.Errorf("failed to parse the current etcd version(%s): %v", currentEtcdVersion.String(), err)
}
// Comparing current etcd version with desired to catch the same version or downgrade condition and fail on them.
if desiredEtcdVersion.LessThan(currentEtcdVersion) {
return false, fmt.Errorf("the desired etcd version for this Kubernetes version %q is %q, but the current etcd version is %q. Won't downgrade etcd, instead just continue", cfg.KubernetesVersion, desiredEtcdVersion.String(), currentEtcdVersion.String())
}
// For the case when desired etcd version is the same as current etcd version
if strings.Compare(desiredEtcdVersion.String(), currentEtcdVersion.String()) == 0 {
return false, nil
}
beforeEtcdPodHash, err := waiter.WaitForStaticPodSingleHash(cfg.NodeName, constants.Etcd)
if err != nil {
return true, fmt.Errorf("fail to get etcd pod's hash: %v", err)
}
// Write the updated etcd static Pod manifest into the temporary directory, at this point no etcd change
// has occured in any aspects.
if err := etcdphase.CreateLocalEtcdStaticPodManifestFile(pathMgr.TempManifestDir(), cfg); err != nil {
return true, fmt.Errorf("error creating local etcd static pod manifest file: %v", err)
}
// Perform etcd upgrade using common to all control plane components function
if err := upgradeComponent(constants.Etcd, waiter, pathMgr, cfg, beforeEtcdPodHash, recoverManifests); err != nil {
// Since etcd upgrade component failed, the old manifest has been restored
// now we need to check the heatlth of etcd cluster if it came back up with old manifest
if _, err := etcdCluster.GetEtcdClusterStatus(); err != nil {
// At this point we know that etcd cluster is dead and it is safe to copy backup datastore and to rollback old etcd manifest
if err := rollbackEtcdData(cfg, fmt.Errorf("etcd cluster is not healthy after upgrade: %v rolling back", err), pathMgr); err != nil {
// Even copying back datastore failed, no options for recovery left, bailing out
return true, fmt.Errorf("fatal error upgrading local etcd cluster: %v, the backup of etcd database is stored here:(%s)", err, backupEtcdDir)
}
// Old datastore has been copied, rolling back old manifests
if err := rollbackOldManifests(recoverManifests, err, pathMgr, true); err != nil {
// Rolling back to old manifests failed, no options for recovery left, bailing out
return true, fmt.Errorf("fatal error upgrading local etcd cluster: %v, the backup of etcd database is stored here:(%s)", err, backupEtcdDir)
}
// Since rollback of the old etcd manifest was successful, checking again the status of etcd cluster
if _, err := etcdCluster.GetEtcdClusterStatus(); err != nil {
// Nothing else left to try to recover etcd cluster
return true, fmt.Errorf("fatal error upgrading local etcd cluster: %v, the backup of etcd database is stored here:(%s)", err, backupEtcdDir)
}
return true, fmt.Errorf("fatal error upgrading local etcd cluster: %v, rolled the state back to pre-upgrade state", err)
}
// Since etcd cluster came back up with the old manifest
return true, fmt.Errorf("fatal error when trying to upgrade the etcd cluster: %v, rolled the state back to pre-upgrade state", err)
}
// Checking health state of etcd after the upgrade
if _, err = etcdCluster.GetEtcdClusterStatus(); err != nil {
// Despite the fact that upgradeComponent was sucessfull, there is something wrong with etcd cluster
// First step is to restore back up of datastore
if err := rollbackEtcdData(cfg, fmt.Errorf("etcd cluster is not healthy after upgrade: %v rolling back", err), pathMgr); err != nil {
// Even copying back datastore failed, no options for recovery left, bailing out
return true, fmt.Errorf("fatal error upgrading local etcd cluster: %v, the backup of etcd database is stored here:(%s)", err, backupEtcdDir)
}
// Old datastore has been copied, rolling back old manifests
if err := rollbackOldManifests(recoverManifests, err, pathMgr, true); err != nil {
// Rolling back to old manifests failed, no options for recovery left, bailing out
return true, fmt.Errorf("fatal error upgrading local etcd cluster: %v, the backup of etcd database is stored here:(%s)", err, backupEtcdDir)
}
// Since rollback of the old etcd manifest was successful, checking again the status of etcd cluster
if _, err := etcdCluster.GetEtcdClusterStatus(); err != nil {
// Nothing else left to try to recover etcd cluster
return true, fmt.Errorf("fatal error upgrading local etcd cluster: %v, the backup of etcd database is stored here:(%s)", err, backupEtcdDir)
}
return true, fmt.Errorf("fatal error upgrading local etcd cluster: %v, rolled the state back to pre-upgrade state", err)
}
return false, nil
}
// StaticPodControlPlane upgrades a static pod-hosted control plane
func StaticPodControlPlane(waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.MasterConfiguration, etcdUpgrade bool) error {
recoverManifests := map[string]string{}
// etcd upgrade is done prior to other control plane components
if etcdUpgrade {
// Perform etcd upgrade using common to all control plane components function
fatal, err := performEtcdStaticPodUpgrade(waiter, pathMgr, cfg, recoverManifests)
if err != nil {
if fatal {
return err
}
fmt.Printf("[upgrade/etcd] non fatal issue encountered during upgrade: %v\n", err)
}
}
beforePodHashMap, err := waiter.WaitForStaticPodControlPlaneHashes(cfg.NodeName)
if err != nil {
return err
}
// Write the updated static Pod manifests into the temporary directory
fmt.Printf("[upgrade/staticpods] Writing new Static Pod manifests to %q\n", pathMgr.TempManifestDir())
err = controlplane.CreateInitStaticPodManifestFiles(pathMgr.TempManifestDir(), cfg)
if err != nil {
return fmt.Errorf("error creating init static pod manifest files: %v", err)
}
for _, component := range constants.MasterComponents {
if err = upgradeComponent(component, waiter, pathMgr, cfg, beforePodHashMap[component], recoverManifests); err != nil {
return err
}
}
// Remove the temporary directories used on a best-effort (don't fail if the calls error out)
// The calls are set here by design; we should _not_ use "defer" above as that would remove the directories
// even in the "fail and rollback" case, where we want the directories preserved for the user.
os.RemoveAll(pathMgr.TempManifestDir())
os.RemoveAll(pathMgr.BackupManifestDir())
os.RemoveAll(pathMgr.BackupEtcdDir())
return nil
}
// rollbackOldManifests rolls back the backuped manifests if something went wrong
func rollbackOldManifests(oldManifests map[string]string, origErr error, pathMgr StaticPodPathManager, restoreEtcd bool) error {
errs := []error{origErr}
for component, backupPath := range oldManifests {
// Will restore etcd manifest only if it was explicitely requested by setting restoreEtcd to True
if component == constants.Etcd && !restoreEtcd {
continue
}
// Where we should put back the backed up manifest
realManifestPath := pathMgr.RealManifestPath(component)
// Move the backup manifest back into the manifests directory
err := pathMgr.MoveFile(backupPath, realManifestPath)
if err != nil {
errs = append(errs, err)
}
}
// Let the user know there we're problems, but we tried to reçover
return fmt.Errorf("couldn't upgrade control plane. kubeadm has tried to recover everything into the earlier state. Errors faced: %v", errs)
}
// rollbackEtcdData rolls back the the content of etcd folder if something went wrong
func rollbackEtcdData(cfg *kubeadmapi.MasterConfiguration, origErr error, pathMgr StaticPodPathManager) error {
errs := []error{origErr}
backupEtcdDir := pathMgr.BackupEtcdDir()
runningEtcdDir := cfg.Etcd.DataDir
err := util.CopyDir(backupEtcdDir, runningEtcdDir)
if err != nil {
errs = append(errs, err)
}
// Let the user know there we're problems, but we tried to reçover
return fmt.Errorf("couldn't recover etcd database with error: %v, the location of etcd backup: %s ", errs, backupEtcdDir)
}

View File

@ -0,0 +1,376 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"crypto/sha256"
"fmt"
"io/ioutil"
"os"
"strings"
"testing"
"time"
"k8s.io/apimachinery/pkg/runtime"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane"
etcdphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
"k8s.io/kubernetes/pkg/api/legacyscheme"
)
const (
waitForHashes = "wait-for-hashes"
waitForHashChange = "wait-for-hash-change"
waitForPodsWithLabel = "wait-for-pods-with-label"
testConfiguration = `
api:
advertiseAddress: 1.2.3.4
bindPort: 6443
apiServerCertSANs: null
apiServerExtraArgs: null
authorizationModes:
- Node
- RBAC
certificatesDir: /etc/kubernetes/pki
cloudProvider: ""
controllerManagerExtraArgs: null
etcd:
caFile: ""
certFile: ""
dataDir: /var/lib/etcd
endpoints: null
extraArgs: null
image: ""
keyFile: ""
featureFlags: null
imageRepository: gcr.io/google_containers
kubernetesVersion: %s
networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
nodeName: thegopher
schedulerExtraArgs: null
token: ce3aa5.5ec8455bb76b379f
tokenTTL: 24h
unifiedControlPlaneImage: ""
`
)
// fakeWaiter is a fake apiclient.Waiter that returns errors it was initialized with
type fakeWaiter struct {
errsToReturn map[string]error
}
func NewFakeStaticPodWaiter(errsToReturn map[string]error) apiclient.Waiter {
return &fakeWaiter{
errsToReturn: errsToReturn,
}
}
// WaitForAPI just returns a dummy nil, to indicate that the program should just proceed
func (w *fakeWaiter) WaitForAPI() error {
return nil
}
// WaitForPodsWithLabel just returns an error if set from errsToReturn
func (w *fakeWaiter) WaitForPodsWithLabel(kvLabel string) error {
return w.errsToReturn[waitForPodsWithLabel]
}
// WaitForPodToDisappear just returns a dummy nil, to indicate that the program should just proceed
func (w *fakeWaiter) WaitForPodToDisappear(podName string) error {
return nil
}
// SetTimeout is a no-op; we don't use it in this implementation
func (w *fakeWaiter) SetTimeout(_ time.Duration) {}
// WaitForStaticPodControlPlaneHashes returns an error if set from errsToReturn
func (w *fakeWaiter) WaitForStaticPodControlPlaneHashes(_ string) (map[string]string, error) {
return map[string]string{}, w.errsToReturn[waitForHashes]
}
// WaitForStaticPodSingleHash returns an error if set from errsToReturn
func (w *fakeWaiter) WaitForStaticPodSingleHash(_ string, _ string) (string, error) {
return "", w.errsToReturn[waitForHashes]
}
// WaitForStaticPodControlPlaneHashChange returns an error if set from errsToReturn
func (w *fakeWaiter) WaitForStaticPodControlPlaneHashChange(_, _, _ string) error {
return w.errsToReturn[waitForHashChange]
}
// WaitForHealthyKubelet returns a dummy nil just to implement the interface
func (w *fakeWaiter) WaitForHealthyKubelet(_ time.Duration, _ string) error {
return nil
}
type fakeStaticPodPathManager struct {
realManifestDir string
tempManifestDir string
backupManifestDir string
backupEtcdDir string
MoveFileFunc func(string, string) error
}
func NewFakeStaticPodPathManager(moveFileFunc func(string, string) error) (StaticPodPathManager, error) {
realManifestsDir, err := ioutil.TempDir("", "kubeadm-upgraded-manifests")
if err != nil {
return nil, fmt.Errorf("couldn't create a temporary directory for the upgrade: %v", err)
}
upgradedManifestsDir, err := ioutil.TempDir("", "kubeadm-upgraded-manifests")
if err != nil {
return nil, fmt.Errorf("couldn't create a temporary directory for the upgrade: %v", err)
}
backupManifestsDir, err := ioutil.TempDir("", "kubeadm-backup-manifests")
if err != nil {
return nil, fmt.Errorf("couldn't create a temporary directory for the upgrade: %v", err)
}
backupEtcdDir, err := ioutil.TempDir("", "kubeadm-backup-etcd")
if err != nil {
return nil, err
}
return &fakeStaticPodPathManager{
realManifestDir: realManifestsDir,
tempManifestDir: upgradedManifestsDir,
backupManifestDir: backupManifestsDir,
backupEtcdDir: backupEtcdDir,
MoveFileFunc: moveFileFunc,
}, nil
}
func (spm *fakeStaticPodPathManager) MoveFile(oldPath, newPath string) error {
return spm.MoveFileFunc(oldPath, newPath)
}
func (spm *fakeStaticPodPathManager) RealManifestPath(component string) string {
return constants.GetStaticPodFilepath(component, spm.realManifestDir)
}
func (spm *fakeStaticPodPathManager) RealManifestDir() string {
return spm.realManifestDir
}
func (spm *fakeStaticPodPathManager) TempManifestPath(component string) string {
return constants.GetStaticPodFilepath(component, spm.tempManifestDir)
}
func (spm *fakeStaticPodPathManager) TempManifestDir() string {
return spm.tempManifestDir
}
func (spm *fakeStaticPodPathManager) BackupManifestPath(component string) string {
return constants.GetStaticPodFilepath(component, spm.backupManifestDir)
}
func (spm *fakeStaticPodPathManager) BackupManifestDir() string {
return spm.backupManifestDir
}
func (spm *fakeStaticPodPathManager) BackupEtcdDir() string {
return spm.backupEtcdDir
}
func TestStaticPodControlPlane(t *testing.T) {
tests := []struct {
waitErrsToReturn map[string]error
moveFileFunc func(string, string) error
expectedErr bool
manifestShouldChange bool
}{
{ // error-free case should succeed
waitErrsToReturn: map[string]error{
waitForHashes: nil,
waitForHashChange: nil,
waitForPodsWithLabel: nil,
},
moveFileFunc: func(oldPath, newPath string) error {
return os.Rename(oldPath, newPath)
},
expectedErr: false,
manifestShouldChange: true,
},
{ // any wait error should result in a rollback and an abort
waitErrsToReturn: map[string]error{
waitForHashes: fmt.Errorf("boo! failed"),
waitForHashChange: nil,
waitForPodsWithLabel: nil,
},
moveFileFunc: func(oldPath, newPath string) error {
return os.Rename(oldPath, newPath)
},
expectedErr: true,
manifestShouldChange: false,
},
{ // any wait error should result in a rollback and an abort
waitErrsToReturn: map[string]error{
waitForHashes: nil,
waitForHashChange: fmt.Errorf("boo! failed"),
waitForPodsWithLabel: nil,
},
moveFileFunc: func(oldPath, newPath string) error {
return os.Rename(oldPath, newPath)
},
expectedErr: true,
manifestShouldChange: false,
},
{ // any wait error should result in a rollback and an abort
waitErrsToReturn: map[string]error{
waitForHashes: nil,
waitForHashChange: nil,
waitForPodsWithLabel: fmt.Errorf("boo! failed"),
},
moveFileFunc: func(oldPath, newPath string) error {
return os.Rename(oldPath, newPath)
},
expectedErr: true,
manifestShouldChange: false,
},
{ // any path-moving error should result in a rollback and an abort
waitErrsToReturn: map[string]error{
waitForHashes: nil,
waitForHashChange: nil,
waitForPodsWithLabel: nil,
},
moveFileFunc: func(oldPath, newPath string) error {
// fail for kube-apiserver move
if strings.Contains(newPath, "kube-apiserver") {
return fmt.Errorf("moving the kube-apiserver file failed")
}
return os.Rename(oldPath, newPath)
},
expectedErr: true,
manifestShouldChange: false,
},
{ // any path-moving error should result in a rollback and an abort
waitErrsToReturn: map[string]error{
waitForHashes: nil,
waitForHashChange: nil,
waitForPodsWithLabel: nil,
},
moveFileFunc: func(oldPath, newPath string) error {
// fail for kube-controller-manager move
if strings.Contains(newPath, "kube-controller-manager") {
return fmt.Errorf("moving the kube-apiserver file failed")
}
return os.Rename(oldPath, newPath)
},
expectedErr: true,
manifestShouldChange: false,
},
{ // any path-moving error should result in a rollback and an abort; even though this is the last component (kube-apiserver and kube-controller-manager healthy)
waitErrsToReturn: map[string]error{
waitForHashes: nil,
waitForHashChange: nil,
waitForPodsWithLabel: nil,
},
moveFileFunc: func(oldPath, newPath string) error {
// fail for kube-scheduler move
if strings.Contains(newPath, "kube-scheduler") {
return fmt.Errorf("moving the kube-apiserver file failed")
}
return os.Rename(oldPath, newPath)
},
expectedErr: true,
manifestShouldChange: false,
},
}
for _, rt := range tests {
waiter := NewFakeStaticPodWaiter(rt.waitErrsToReturn)
pathMgr, err := NewFakeStaticPodPathManager(rt.moveFileFunc)
if err != nil {
t.Fatalf("couldn't run NewFakeStaticPodPathManager: %v", err)
}
defer os.RemoveAll(pathMgr.RealManifestDir())
defer os.RemoveAll(pathMgr.TempManifestDir())
defer os.RemoveAll(pathMgr.BackupManifestDir())
oldcfg, err := getConfig("v1.7.0")
if err != nil {
t.Fatalf("couldn't create config: %v", err)
}
// Initialize the directory with v1.7 manifests; should then be upgraded to v1.8 using the method
err = controlplane.CreateInitStaticPodManifestFiles(pathMgr.RealManifestDir(), oldcfg)
if err != nil {
t.Fatalf("couldn't run CreateInitStaticPodManifestFiles: %v", err)
}
err = etcdphase.CreateLocalEtcdStaticPodManifestFile(pathMgr.RealManifestDir(), oldcfg)
if err != nil {
t.Fatalf("couldn't run CreateLocalEtcdStaticPodManifestFile: %v", err)
}
// Get a hash of the v1.7 API server manifest to compare later (was the file re-written)
oldHash, err := getAPIServerHash(pathMgr.RealManifestDir())
if err != nil {
t.Fatalf("couldn't read temp file: %v", err)
}
newcfg, err := getConfig("v1.8.0")
if err != nil {
t.Fatalf("couldn't create config: %v", err)
}
actualErr := StaticPodControlPlane(waiter, pathMgr, newcfg, false)
if (actualErr != nil) != rt.expectedErr {
t.Errorf(
"failed UpgradeStaticPodControlPlane\n\texpected error: %t\n\tgot: %t",
rt.expectedErr,
(actualErr != nil),
)
}
newHash, err := getAPIServerHash(pathMgr.RealManifestDir())
if err != nil {
t.Fatalf("couldn't read temp file: %v", err)
}
if (oldHash != newHash) != rt.manifestShouldChange {
t.Errorf(
"failed StaticPodControlPlane\n\texpected manifest change: %t\n\tgot: %t",
rt.manifestShouldChange,
(oldHash != newHash),
)
}
}
}
func getAPIServerHash(dir string) (string, error) {
manifestPath := constants.GetStaticPodFilepath(constants.KubeAPIServer, dir)
fileBytes, err := ioutil.ReadFile(manifestPath)
if err != nil {
return "", err
}
return fmt.Sprintf("%x", sha256.Sum256(fileBytes)), nil
}
func getConfig(version string) (*kubeadmapi.MasterConfiguration, error) {
externalcfg := &kubeadmapiext.MasterConfiguration{}
internalcfg := &kubeadmapi.MasterConfiguration{}
if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), []byte(fmt.Sprintf(testConfiguration, version)), externalcfg); err != nil {
return nil, fmt.Errorf("unable to decode config: %v", err)
}
legacyscheme.Scheme.Convert(externalcfg, internalcfg, nil)
return internalcfg, nil
}

View File

@ -0,0 +1,124 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"fmt"
"io"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
versionutil "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/pkg/version"
)
// VersionGetter defines an interface for fetching different versions.
// Easy to implement a fake variant of this interface for unit testing
type VersionGetter interface {
// ClusterVersion should return the version of the cluster i.e. the API Server version
ClusterVersion() (string, *versionutil.Version, error)
// KubeadmVersion should return the version of the kubeadm CLI
KubeadmVersion() (string, *versionutil.Version, error)
// VersionFromCILabel should resolve CI labels like `latest`, `stable`, `stable-1.8`, etc. to real versions
VersionFromCILabel(string, string) (string, *versionutil.Version, error)
// KubeletVersions should return a map with a version and a number that describes how many kubelets there are for that version
KubeletVersions() (map[string]uint16, error)
}
// KubeVersionGetter handles the version-fetching mechanism from external sources
type KubeVersionGetter struct {
client clientset.Interface
w io.Writer
}
// NewKubeVersionGetter returns a new instance of KubeVersionGetter
func NewKubeVersionGetter(client clientset.Interface, writer io.Writer) VersionGetter {
return &KubeVersionGetter{
client: client,
w: writer,
}
}
// ClusterVersion gets API server version
func (g *KubeVersionGetter) ClusterVersion() (string, *versionutil.Version, error) {
clusterVersionInfo, err := g.client.Discovery().ServerVersion()
if err != nil {
return "", nil, fmt.Errorf("Couldn't fetch cluster version from the API Server: %v", err)
}
fmt.Fprintf(g.w, "[upgrade/versions] Cluster version: %s\n", clusterVersionInfo.String())
clusterVersion, err := versionutil.ParseSemantic(clusterVersionInfo.String())
if err != nil {
return "", nil, fmt.Errorf("Couldn't parse cluster version: %v", err)
}
return clusterVersionInfo.String(), clusterVersion, nil
}
// KubeadmVersion gets kubeadm version
func (g *KubeVersionGetter) KubeadmVersion() (string, *versionutil.Version, error) {
kubeadmVersionInfo := version.Get()
fmt.Fprintf(g.w, "[upgrade/versions] kubeadm version: %s\n", kubeadmVersionInfo.String())
kubeadmVersion, err := versionutil.ParseSemantic(kubeadmVersionInfo.String())
if err != nil {
return "", nil, fmt.Errorf("Couldn't parse kubeadm version: %v", err)
}
return kubeadmVersionInfo.String(), kubeadmVersion, nil
}
// VersionFromCILabel resolves a version label like "latest" or "stable" to an actual version using the public Kubernetes CI uploads
func (g *KubeVersionGetter) VersionFromCILabel(ciVersionLabel, description string) (string, *versionutil.Version, error) {
versionStr, err := kubeadmutil.KubernetesReleaseVersion(ciVersionLabel)
if err != nil {
return "", nil, fmt.Errorf("Couldn't fetch latest %s from the internet: %v", description, err)
}
if description != "" {
fmt.Fprintf(g.w, "[upgrade/versions] Latest %s: %s\n", description, versionStr)
}
ver, err := versionutil.ParseSemantic(versionStr)
if err != nil {
return "", nil, fmt.Errorf("Couldn't parse latest %s: %v", description, err)
}
return versionStr, ver, nil
}
// KubeletVersions gets the versions of the kubelets in the cluster
func (g *KubeVersionGetter) KubeletVersions() (map[string]uint16, error) {
nodes, err := g.client.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("couldn't list all nodes in cluster")
}
return computeKubeletVersions(nodes.Items), nil
}
// computeKubeletVersions returns a string-int map that describes how many nodes are of a specific version
func computeKubeletVersions(nodes []v1.Node) map[string]uint16 {
kubeletVersions := map[string]uint16{}
for _, node := range nodes {
kver := node.Status.NodeInfo.KubeletVersion
if _, found := kubeletVersions[kver]; !found {
kubeletVersions[kver] = 1
continue
}
kubeletVersions[kver]++
}
return kubeletVersions
}

View File

@ -0,0 +1,55 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = ["uploadconfig.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig",
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/util/apiclient:go_default_library",
"//pkg/api/legacyscheme:go_default_library",
"//vendor/github.com/ghodss/yaml:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["uploadconfig_test.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig",
library = ":go_default_library",
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//pkg/api/legacyscheme:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
],
)

View File

@ -0,0 +1,60 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package uploadconfig
import (
"fmt"
"github.com/ghodss/yaml"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
"k8s.io/kubernetes/pkg/api/legacyscheme"
)
// UploadConfiguration saves the MasterConfiguration used for later reference (when upgrading for instance)
func UploadConfiguration(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
fmt.Printf("[uploadconfig] Storing the configuration used in ConfigMap %q in the %q Namespace\n", kubeadmconstants.MasterConfigurationConfigMap, metav1.NamespaceSystem)
// Convert cfg to the external version as that's the only version of the API that can be deserialized later
externalcfg := &kubeadmapiext.MasterConfiguration{}
legacyscheme.Scheme.Convert(cfg, externalcfg, nil)
// Removes sensitive info from the data that will be stored in the config map
externalcfg.Token = ""
cfgYaml, err := yaml.Marshal(*externalcfg)
if err != nil {
return err
}
return apiclient.CreateOrUpdateConfigMap(client, &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: kubeadmconstants.MasterConfigurationConfigMap,
Namespace: metav1.NamespaceSystem,
},
Data: map[string]string{
kubeadmconstants.MasterConfigurationConfigMapKey: string(cfgYaml),
},
})
}

View File

@ -0,0 +1,119 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package uploadconfig
import (
"testing"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clientsetfake "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/pkg/api/legacyscheme"
)
func TestUploadConfiguration(t *testing.T) {
tests := []struct {
name string
errOnCreate error
errOnUpdate error
updateExisting bool
errExpected bool
verifyResult bool
}{
{
name: "basic validation with correct key",
verifyResult: true,
},
{
name: "update existing should report no error",
updateExisting: true,
verifyResult: true,
},
{
name: "unexpected errors for create should be returned",
errOnCreate: apierrors.NewUnauthorized(""),
errExpected: true,
},
{
name: "update existing show report error if unexpected error for update is returned",
errOnUpdate: apierrors.NewUnauthorized(""),
updateExisting: true,
errExpected: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := &kubeadmapi.MasterConfiguration{
KubernetesVersion: "1.7.3",
Token: "1234567",
}
client := clientsetfake.NewSimpleClientset()
if tt.errOnCreate != nil {
client.PrependReactor("create", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, tt.errOnCreate
})
}
// For idempotent test, we check the result of the second call.
if err := UploadConfiguration(cfg, client); !tt.updateExisting && (err != nil) != tt.errExpected {
t.Errorf("UploadConfiguration() error = %v, wantErr %v", err, tt.errExpected)
}
if tt.updateExisting {
if tt.errOnUpdate != nil {
client.PrependReactor("update", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, tt.errOnUpdate
})
}
if err := UploadConfiguration(cfg, client); (err != nil) != tt.errExpected {
t.Errorf("UploadConfiguration() error = %v", err)
}
}
if tt.verifyResult {
masterCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(kubeadmconstants.MasterConfigurationConfigMap, metav1.GetOptions{})
if err != nil {
t.Errorf("Fail to query ConfigMap error = %v", err)
}
configData := masterCfg.Data[kubeadmconstants.MasterConfigurationConfigMapKey]
if configData == "" {
t.Errorf("Fail to find ConfigMap key")
}
decodedExtCfg := &kubeadmapiext.MasterConfiguration{}
decodedCfg := &kubeadmapi.MasterConfiguration{}
if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), []byte(configData), decodedExtCfg); err != nil {
t.Errorf("unable to decode config from bytes: %v", err)
}
// Default and convert to the internal version
legacyscheme.Scheme.Default(decodedExtCfg)
legacyscheme.Scheme.Convert(decodedExtCfg, decodedCfg, nil)
if decodedCfg.KubernetesVersion != cfg.KubernetesVersion {
t.Errorf("Decoded value doesn't match, decoded = %#v, expected = %#v", decodedCfg.KubernetesVersion, cfg.KubernetesVersion)
}
if decodedCfg.Token != "" {
t.Errorf("Decoded value contains token (sensitive info), decoded = %#v, expected = empty", decodedCfg.Token)
}
}
})
}
}