vendor updates

This commit is contained in:
Serguei Bezverkhi
2018-03-06 17:33:18 -05:00
parent 4b3ebc171b
commit e9033989a0
5854 changed files with 248382 additions and 119809 deletions

View File

@ -12,8 +12,7 @@ go_test(
"dns_test.go",
"versions_test.go",
],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns",
library = ":go_default_library",
embed = [":go_default_library"],
deps = [
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
@ -42,7 +41,7 @@ go_library(
"//cmd/kubeadm/app/util/apiclient:go_default_library",
"//pkg/api/legacyscheme:go_default_library",
"//pkg/util/version:go_default_library",
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",

View File

@ -20,7 +20,7 @@ import (
"fmt"
"runtime"
apps "k8s.io/api/apps/v1beta2"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@ -75,7 +75,7 @@ func kubeDNSAddon(cfg *kubeadmapi.MasterConfiguration, client clientset.Interfac
// Get the YAML manifest conditionally based on the k8s version
kubeDNSDeploymentBytes := GetKubeDNSManifest(k8sVersion)
dnsDeploymentBytes, err := kubeadmutil.ParseTemplate(kubeDNSDeploymentBytes,
struct{ ImageRepository, Arch, Version, DNSBindAddr, DNSProbeAddr, DNSDomain, DNSProbeType, MasterTaintKey string }{
struct{ ImageRepository, Arch, Version, DNSBindAddr, DNSProbeAddr, DNSDomain, MasterTaintKey string }{
ImageRepository: cfg.ImageRepository,
Arch: runtime.GOARCH,
// Get the kube-dns version conditionally based on the k8s version
@ -83,7 +83,6 @@ func kubeDNSAddon(cfg *kubeadmapi.MasterConfiguration, client clientset.Interfac
DNSBindAddr: dnsBindAddr,
DNSProbeAddr: dnsProbeAddr,
DNSDomain: cfg.Networking.DNSDomain,
DNSProbeType: GetKubeDNSProbeType(k8sVersion),
MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster,
})
if err != nil {
@ -203,7 +202,7 @@ func createCoreDNSAddon(deploymentBytes, serviceBytes, configBytes []byte, clien
coreDNSServiceAccount := &v1.ServiceAccount{}
if err := kuberuntime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), []byte(CoreDNSServiceAccount), coreDNSServiceAccount); err != nil {
return fmt.Errorf("unable to decode CoreDNS configmap %v", err)
return fmt.Errorf("unable to decode CoreDNS serviceaccount %v", err)
}
// Create the ConfigMap for CoreDNS or update it in case it already exists

View File

@ -92,14 +92,13 @@ func TestCompileManifests(t *testing.T) {
}{
{
manifest: v180AndAboveKubeDNSDeployment,
data: struct{ ImageRepository, Arch, Version, DNSBindAddr, DNSProbeAddr, DNSDomain, DNSProbeType, MasterTaintKey string }{
data: struct{ ImageRepository, Arch, Version, DNSBindAddr, DNSProbeAddr, DNSDomain, MasterTaintKey string }{
ImageRepository: "foo",
Arch: "foo",
Version: "foo",
DNSBindAddr: "foo",
DNSProbeAddr: "foo",
DNSDomain: "foo",
DNSProbeType: "foo",
MasterTaintKey: "foo",
},
expected: true,

View File

@ -19,7 +19,7 @@ package dns
const (
// v180AndAboveKubeDNSDeployment is the kube-dns Deployment manifest for the kube-dns manifest for v1.7+
v180AndAboveKubeDNSDeployment = `
apiVersion: apps/v1beta2
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-dns
@ -156,8 +156,8 @@ spec:
args:
- --v=2
- --logtostderr
- --probe=kubedns,{{ .DNSProbeAddr }}:10053,kubernetes.default.svc.{{ .DNSDomain }},5,{{ .DNSProbeType }}
- --probe=dnsmasq,{{ .DNSProbeAddr }}:53,kubernetes.default.svc.{{ .DNSDomain }},5,{{ .DNSProbeType }}
- --probe=kubedns,{{ .DNSProbeAddr }}:10053,kubernetes.default.svc.{{ .DNSDomain }},5,SRV
- --probe=dnsmasq,{{ .DNSProbeAddr }}:53,kubernetes.default.svc.{{ .DNSDomain }},5,SRV
ports:
- containerPort: 10054
name: metrics
@ -216,7 +216,7 @@ spec:
// CoreDNSDeployment is the CoreDNS Deployment manifest
CoreDNSDeployment = `
apiVersion: apps/v1beta2
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
@ -224,7 +224,11 @@ metadata:
labels:
k8s-app: kube-dns
spec:
replicas: 1
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
@ -239,6 +243,18 @@ spec:
operator: Exists
- key: {{ .MasterTaintKey }}
effect: NoSchedule
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- coredns
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: coredns/coredns:{{ .Version }}
@ -260,9 +276,6 @@ spec:
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
@ -293,12 +306,13 @@ data:
Corefile: |
.:53 {
errors
log
health
kubernetes {{ .DNSDomain }} {{ .ServiceCIDR }} {
pods insecure
upstream /etc/resolv.conf
fallthrough in-addr.arpa ip6.arpa
}
prometheus
prometheus :9153
proxy . /etc/resolv.conf
cache 30
}

View File

@ -22,48 +22,26 @@ import (
)
const (
kubeDNSv180AndAboveVersion = "1.14.5"
kubeDNSv190AndAboveVersion = "1.14.7"
kubeDNSProbeSRV = "SRV"
kubeDNSProbeA = "A"
coreDNSVersion = "1.0.1"
kubeDNSv190AndAboveVersion = "1.14.8"
coreDNSVersion = "1.0.4"
)
// GetDNSVersion returns the right kube-dns version for a specific k8s version
func GetDNSVersion(kubeVersion *version.Version, dns string) string {
// v1.8.0+ uses kube-dns 1.14.5
// v1.9.0+ uses kube-dns 1.14.7
// v1.9.0+ uses CoreDNS 1.0.1
// v1.9.0+ uses kube-dns 1.14.8
// v1.9.0+ uses CoreDNS 1.0.4 if feature gate "CoreDNS" is enabled.
// In the future when the version is bumped at HEAD; add conditional logic to return the right versions
// Also, the version might be bumped for different k8s releases on the same branch
switch dns {
case kubeadmconstants.KubeDNS:
// return the kube-dns version
if kubeVersion.Major() == 1 && kubeVersion.Minor() >= 9 {
return kubeDNSv190AndAboveVersion
}
return kubeDNSv180AndAboveVersion
case kubeadmconstants.CoreDNS:
// return the CoreDNS version
return coreDNSVersion
default:
return kubeDNSv180AndAboveVersion
return kubeDNSv190AndAboveVersion
}
}
// GetKubeDNSProbeType returns the right kube-dns probe for a specific k8s version
func GetKubeDNSProbeType(kubeVersion *version.Version) string {
// v1.8.0+ uses type A, just return that here
// In the future when the kube-dns version is bumped at HEAD; add conditional logic to return the right versions
// Also, the version might be bumped for different k8s releases on the same branch
if kubeVersion.Major() == 1 && kubeVersion.Minor() >= 9 {
return kubeDNSProbeSRV
}
return kubeDNSProbeA
}
// GetKubeDNSManifest returns the right kube-dns YAML manifest for a specific k8s version
func GetKubeDNSManifest(kubeVersion *version.Version) string {
// v1.8.0+ has only one known YAML manifest spec, just return that here

View File

@ -25,45 +25,38 @@ import (
func TestGetKubeDNSVersion(t *testing.T) {
var tests = []struct {
k8sVersion, expected string
k8sVersion string
dns string
expected string
}{
{
k8sVersion: "v1.7.0",
expected: "1.14.5",
k8sVersion: "v1.9.0",
dns: kubeadmconstants.KubeDNS,
expected: kubeDNSv190AndAboveVersion,
},
{
k8sVersion: "v1.7.1",
expected: "1.14.5",
},
{
k8sVersion: "v1.7.2",
expected: "1.14.5",
},
{
k8sVersion: "v1.7.3",
expected: "1.14.5",
},
{
k8sVersion: "v1.8.0-alpha.2",
expected: "1.14.5",
},
{
k8sVersion: "v1.8.0",
expected: "1.14.5",
k8sVersion: "v1.10.0",
dns: kubeadmconstants.KubeDNS,
expected: kubeDNSv190AndAboveVersion,
},
{
k8sVersion: "v1.9.0",
expected: "1.14.7",
dns: kubeadmconstants.CoreDNS,
expected: coreDNSVersion,
},
{
k8sVersion: "v1.10.0",
dns: kubeadmconstants.CoreDNS,
expected: coreDNSVersion,
},
}
for _, rt := range tests {
k8sVersion, err := version.ParseSemantic(rt.k8sVersion)
if err != nil {
t.Fatalf("couldn't parse kubernetes version %q: %v", rt.k8sVersion, err)
}
actualDNSVersion := GetDNSVersion(k8sVersion, kubeadmconstants.KubeDNS)
actualDNSVersion := GetDNSVersion(k8sVersion, rt.dns)
if actualDNSVersion != rt.expected {
t.Errorf(
"failed GetDNSVersion:\n\texpected: %s\n\t actual: %s",
@ -73,54 +66,3 @@ func TestGetKubeDNSVersion(t *testing.T) {
}
}
}
func TestGetKubeDNSProbeType(t *testing.T) {
var tests = []struct {
k8sVersion, expected string
}{
{
k8sVersion: "v1.7.0",
expected: "A",
},
{
k8sVersion: "v1.7.1",
expected: "A",
},
{
k8sVersion: "v1.7.2",
expected: "A",
},
{
k8sVersion: "v1.7.3",
expected: "A",
},
{
k8sVersion: "v1.8.0-alpha.2",
expected: "A",
},
{
k8sVersion: "v1.8.0",
expected: "A",
},
{
k8sVersion: "v1.9.0",
expected: "SRV",
},
}
for _, rt := range tests {
k8sVersion, err := version.ParseSemantic(rt.k8sVersion)
if err != nil {
t.Fatalf("couldn't parse kubernetes version %q: %v", rt.k8sVersion, err)
}
actualDNSProbeType := GetKubeDNSProbeType(k8sVersion)
if actualDNSProbeType != rt.expected {
t.Errorf(
"failed GetKubeDNSProbeType:\n\texpected: %s\n\t actual: %s",
rt.expected,
actualDNSProbeType,
)
}
}
}

View File

@ -9,8 +9,7 @@ load(
go_test(
name = "go_default_test",
srcs = ["proxy_test.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/proxy",
library = ":go_default_library",
embed = [":go_default_library"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
@ -41,9 +40,8 @@ go_library(
"//pkg/api/legacyscheme:go_default_library",
"//pkg/proxy/apis/kubeproxyconfig/scheme:go_default_library",
"//pkg/proxy/apis/kubeproxyconfig/v1alpha1:go_default_library",
"//pkg/util/version:go_default_library",
"//plugin/pkg/scheduler/algorithm:go_default_library",
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -17,37 +17,6 @@ limitations under the License.
package proxy
const (
// KubeProxyConfigMap18 is the proxy ConfigMap manifest for Kubernetes version 1.8
KubeProxyConfigMap18 = `
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-proxy
namespace: kube-system
labels:
app: kube-proxy
data:
kubeconfig.conf: |
apiVersion: v1
kind: Config
clusters:
- cluster:
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
server: {{ .MasterEndpoint }}
name: default
contexts:
- context:
cluster: default
namespace: default
user: default
name: default
current-context: default
users:
- name: default
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
`
// KubeProxyConfigMap19 is the proxy ConfigMap manifest for Kubernetes 1.9 and above
KubeProxyConfigMap19 = `
kind: ConfigMap
@ -79,70 +48,11 @@ data:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
config.conf: |-
{{ .ProxyConfig}}
`
// KubeProxyDaemonSet18 is the proxy DaemonSet manifest for Kubernetes version 1.8
KubeProxyDaemonSet18 = `
apiVersion: apps/v1beta2
kind: DaemonSet
metadata:
labels:
k8s-app: kube-proxy
name: kube-proxy
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-proxy
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
k8s-app: kube-proxy
spec:
containers:
- name: kube-proxy
image: {{ if .ImageOverride }}{{ .ImageOverride }}{{ else }}{{ .ImageRepository }}/kube-proxy-{{ .Arch }}:{{ .Version }}{{ end }}
imagePullPolicy: IfNotPresent
command:
- /usr/local/bin/kube-proxy
- --kubeconfig=/var/lib/kube-proxy/kubeconfig.conf
{{ .ClusterCIDR }}
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/lib/kube-proxy
name: kube-proxy
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /lib/modules
name: lib-modules
readOnly: true
hostNetwork: true
serviceAccountName: kube-proxy
tolerations:
- key: {{ .MasterTaintKey }}
effect: NoSchedule
- key: {{ .CloudTaintKey }}
value: "true"
effect: NoSchedule
volumes:
- name: kube-proxy
configMap:
name: kube-proxy
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: lib-modules
hostPath:
path: /lib/modules
`
// KubeProxyDaemonSet19 is the proxy DaemonSet manifest for Kubernetes 1.9 and above
KubeProxyDaemonSet19 = `
apiVersion: apps/v1beta2
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:

View File

@ -17,10 +17,11 @@ limitations under the License.
package proxy
import (
"bytes"
"fmt"
"runtime"
apps "k8s.io/api/apps/v1beta2"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -33,8 +34,7 @@ import (
"k8s.io/kubernetes/pkg/api/legacyscheme"
kubeproxyconfigscheme "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme"
kubeproxyconfigv1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1"
"k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
)
const (
@ -58,63 +58,35 @@ func EnsureProxyAddon(cfg *kubeadmapi.MasterConfiguration, client clientset.Inte
return err
}
proxyBytes, err := kubeadmutil.MarshalToYamlForCodecsWithShift(cfg.KubeProxy.Config, kubeproxyconfigv1alpha1.SchemeGroupVersion,
proxyBytes, err := kubeadmutil.MarshalToYamlForCodecs(cfg.KubeProxy.Config, kubeproxyconfigv1alpha1.SchemeGroupVersion,
kubeproxyconfigscheme.Codecs)
if err != nil {
return fmt.Errorf("error when marshaling: %v", err)
}
// Parse the given kubernetes version
k8sVersion, err := version.ParseSemantic(cfg.KubernetesVersion)
if err != nil {
return fmt.Errorf("couldn't parse kubernetes version %q: %v", cfg.KubernetesVersion, err)
}
var prefixBytes bytes.Buffer
apiclient.PrintBytesWithLinePrefix(&prefixBytes, proxyBytes, " ")
var proxyConfigMapBytes, proxyDaemonSetBytes []byte
if k8sVersion.AtLeast(kubeadmconstants.MinimumKubeProxyComponentConfigVersion) {
proxyConfigMapBytes, err = kubeadmutil.ParseTemplate(KubeProxyConfigMap19,
struct {
MasterEndpoint string
ProxyConfig string
}{
MasterEndpoint: masterEndpoint,
ProxyConfig: proxyBytes,
})
if err != nil {
return fmt.Errorf("error when parsing kube-proxy configmap template: %v", err)
}
proxyDaemonSetBytes, err = kubeadmutil.ParseTemplate(KubeProxyDaemonSet19, struct{ ImageRepository, Arch, Version, ImageOverride, ClusterCIDR, MasterTaintKey, CloudTaintKey string }{
ImageRepository: cfg.GetControlPlaneImageRepository(),
Arch: runtime.GOARCH,
Version: kubeadmutil.KubernetesVersionToImageTag(cfg.KubernetesVersion),
ImageOverride: cfg.UnifiedControlPlaneImage,
MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster,
CloudTaintKey: algorithm.TaintExternalCloudProvider,
proxyConfigMapBytes, err = kubeadmutil.ParseTemplate(KubeProxyConfigMap19,
struct {
MasterEndpoint string
ProxyConfig string
}{
MasterEndpoint: masterEndpoint,
ProxyConfig: prefixBytes.String(),
})
if err != nil {
return fmt.Errorf("error when parsing kube-proxy daemonset template: %v", err)
}
} else {
proxyConfigMapBytes, err = kubeadmutil.ParseTemplate(KubeProxyConfigMap18,
struct {
MasterEndpoint string
}{
MasterEndpoint: masterEndpoint,
})
if err != nil {
return fmt.Errorf("error when parsing kube-proxy configmap template: %v", err)
}
proxyDaemonSetBytes, err = kubeadmutil.ParseTemplate(KubeProxyDaemonSet18, struct{ ImageRepository, Arch, Version, ImageOverride, ClusterCIDR, MasterTaintKey, CloudTaintKey string }{
ImageRepository: cfg.GetControlPlaneImageRepository(),
Arch: runtime.GOARCH,
Version: kubeadmutil.KubernetesVersionToImageTag(cfg.KubernetesVersion),
ImageOverride: cfg.UnifiedControlPlaneImage,
ClusterCIDR: getClusterCIDR(cfg.Networking.PodSubnet),
MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster,
CloudTaintKey: algorithm.TaintExternalCloudProvider,
})
if err != nil {
return fmt.Errorf("error when parsing kube-proxy daemonset template: %v", err)
}
if err != nil {
return fmt.Errorf("error when parsing kube-proxy configmap template: %v", err)
}
proxyDaemonSetBytes, err = kubeadmutil.ParseTemplate(KubeProxyDaemonSet19, struct{ ImageRepository, Arch, Version, ImageOverride, MasterTaintKey, CloudTaintKey string }{
ImageRepository: cfg.GetControlPlaneImageRepository(),
Arch: runtime.GOARCH,
Version: kubeadmutil.KubernetesVersionToImageTag(cfg.KubernetesVersion),
ImageOverride: cfg.UnifiedControlPlaneImage,
MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster,
CloudTaintKey: algorithm.TaintExternalCloudProvider,
})
if err != nil {
return fmt.Errorf("error when parsing kube-proxy daemonset template: %v", err)
}
if err := createKubeProxyAddon(proxyConfigMapBytes, proxyDaemonSetBytes, client); err != nil {
return err
@ -182,10 +154,3 @@ func createClusterRoleBindings(client clientset.Interface) error {
},
})
}
func getClusterCIDR(podsubnet string) string {
if len(podsubnet) == 0 {
return ""
}
return "- --cluster-cidr=" + podsubnet
}

View File

@ -90,38 +90,12 @@ func TestCreateServiceAccount(t *testing.T) {
}
}
func TestGetClusterCIDR(t *testing.T) {
emptyClusterCIDR := getClusterCIDR("")
if emptyClusterCIDR != "" {
t.Errorf("Invalid format: %s", emptyClusterCIDR)
}
clusterCIDR := getClusterCIDR("10.244.0.0/16")
if clusterCIDR != "- --cluster-cidr=10.244.0.0/16" {
t.Errorf("Invalid format: %s", clusterCIDR)
}
clusterIPv6CIDR := getClusterCIDR("2001:db8::/64")
if clusterIPv6CIDR != "- --cluster-cidr=2001:db8::/64" {
t.Errorf("Invalid format: %s", clusterIPv6CIDR)
}
}
func TestCompileManifests(t *testing.T) {
var tests = []struct {
manifest string
data interface{}
expected bool
}{
{
manifest: KubeProxyConfigMap18,
data: struct {
MasterEndpoint, ProxyConfig string
}{
MasterEndpoint: "foo",
},
expected: true,
},
{
manifest: KubeProxyConfigMap19,
data: struct {
@ -132,19 +106,6 @@ func TestCompileManifests(t *testing.T) {
},
expected: true,
},
{
manifest: KubeProxyDaemonSet18,
data: struct{ ImageRepository, Arch, Version, ImageOverride, ClusterCIDR, MasterTaintKey, CloudTaintKey string }{
ImageRepository: "foo",
Arch: "foo",
Version: "foo",
ImageOverride: "foo",
ClusterCIDR: "foo",
MasterTaintKey: "foo",
CloudTaintKey: "foo",
},
expected: true,
},
{
manifest: KubeProxyDaemonSet19,
data: struct{ ImageRepository, Arch, Version, ImageOverride, MasterTaintKey, CloudTaintKey string }{
@ -240,7 +201,7 @@ func TestEnsureProxyAddon(t *testing.T) {
UnifiedControlPlaneImage: "someImage",
}
// Simulate an error if neccessary
// Simulate an error if necessary
switch tc.simError {
case ServiceAccountError:
client.PrependReactor("create", "serviceaccounts", func(action core.Action) (bool, runtime.Object, error) {

View File

@ -9,8 +9,7 @@ load(
go_test(
name = "go_default_test",
srcs = ["clusterinfo_test.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo",
library = ":go_default_library",
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
@ -27,12 +26,12 @@ go_library(
deps = [
"//cmd/kubeadm/app/util/apiclient:go_default_library",
"//pkg/apis/rbac/v1:go_default_library",
"//pkg/bootstrap/api:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/tools/bootstrap/token/api:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
],

View File

@ -24,11 +24,11 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apiserver/pkg/authentication/user"
clientset "k8s.io/client-go/kubernetes"
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
rbachelper "k8s.io/kubernetes/pkg/apis/rbac/v1"
bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api"
)
const (

View File

@ -9,8 +9,7 @@ load(
go_test(
name = "go_default_test",
srcs = ["token_test.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node",
library = ":go_default_library",
embed = [":go_default_library"],
deps = ["//cmd/kubeadm/app/apis/kubeadm:go_default_library"],
)
@ -25,12 +24,13 @@ go_library(
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/util/apiclient:go_default_library",
"//cmd/kubeadm/app/util/token:go_default_library",
"//pkg/bootstrap/api:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/tools/bootstrap/token/api:go_default_library",
"//vendor/k8s.io/client-go/tools/bootstrap/token/util:go_default_library",
],
)

View File

@ -25,8 +25,9 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
bootstraputil "k8s.io/client-go/tools/bootstrap/token/util"
tokenutil "k8s.io/kubernetes/cmd/kubeadm/app/util/token"
bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api"
)
const tokenCreateRetries = 5
@ -115,7 +116,7 @@ func encodeTokenSecretData(tokenID, tokenSecret string, duration time.Duration,
}
// validate usages
if err := bootstrapapi.ValidateUsages(usages); err != nil {
if err := bootstraputil.ValidateUsages(usages); err != nil {
return nil, err
}
for _, usage := range usages {

View File

@ -9,8 +9,7 @@ load(
go_test(
name = "go_default_test",
srcs = ["certs_test.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs",
library = ":go_default_library",
embed = [":go_default_library"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
@ -31,8 +30,6 @@ go_library(
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/phases/certs/pkiutil:go_default_library",
"//pkg/registry/core/service/ipallocator:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library",
"//vendor/k8s.io/client-go/util/cert:go_default_library",
],
)

View File

@ -20,16 +20,13 @@ import (
"crypto/rsa"
"crypto/x509"
"fmt"
"net"
"os"
"path/filepath"
"k8s.io/apimachinery/pkg/util/validation"
certutil "k8s.io/client-go/util/cert"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil"
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
)
// CreatePKIAssets will create and write to disk all PKI assets necessary to establish the control plane.
@ -40,6 +37,9 @@ func CreatePKIAssets(cfg *kubeadmapi.MasterConfiguration) error {
CreateCACertAndKeyfiles,
CreateAPIServerCertAndKeyFiles,
CreateAPIServerKubeletClientCertAndKeyFiles,
CreateEtcdServerCertAndKeyFiles,
CreateEtcdPeerCertAndKeyFiles,
CreateAPIServerEtcdClientCertAndKeyFiles,
CreateServiceAccountKeyAndPublicKeyFiles,
CreateFrontProxyCACertAndKeyFiles,
CreateFrontProxyClientCertAndKeyFiles,
@ -79,7 +79,7 @@ func CreateCACertAndKeyfiles(cfg *kubeadmapi.MasterConfiguration) error {
// It assumes the cluster CA certificate and key files should exists into the CertificatesDir
func CreateAPIServerCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
caCert, caKey, err := loadCertificateAuthorithy(cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName)
caCert, caKey, err := loadCertificateAuthority(cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName)
if err != nil {
return err
}
@ -103,12 +103,12 @@ func CreateAPIServerCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
// It assumes the cluster CA certificate and key files should exists into the CertificatesDir
func CreateAPIServerKubeletClientCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
caCert, caKey, err := loadCertificateAuthorithy(cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName)
caCert, caKey, err := loadCertificateAuthority(cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName)
if err != nil {
return err
}
apiClientCert, apiClientKey, err := NewAPIServerKubeletClientCertAndKey(caCert, caKey)
apiKubeletClientCert, apiKubeletClientKey, err := NewAPIServerKubeletClientCertAndKey(caCert, caKey)
if err != nil {
return err
}
@ -117,8 +117,80 @@ func CreateAPIServerKubeletClientCertAndKeyFiles(cfg *kubeadmapi.MasterConfigura
cfg.CertificatesDir,
kubeadmconstants.APIServerKubeletClientCertAndKeyBaseName,
caCert,
apiClientCert,
apiClientKey,
apiKubeletClientCert,
apiKubeletClientKey,
)
}
// CreateEtcdServerCertAndKeyFiles create a new certificate and key file for etcd.
// If the etcd serving certificate and key file already exist in the target folder, they are used only if evaluated equal; otherwise an error is returned.
// It assumes the cluster CA certificate and key file exist in the CertificatesDir
func CreateEtcdServerCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
caCert, caKey, err := loadCertificateAuthority(cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName)
if err != nil {
return err
}
etcdServerCert, etcdServerKey, err := NewEtcdServerCertAndKey(cfg, caCert, caKey)
if err != nil {
return err
}
return writeCertificateFilesIfNotExist(
cfg.CertificatesDir,
kubeadmconstants.EtcdServerCertAndKeyBaseName,
caCert,
etcdServerCert,
etcdServerKey,
)
}
// CreateEtcdPeerCertAndKeyFiles create a new certificate and key file for etcd peering.
// If the etcd peer certificate and key file already exist in the target folder, they are used only if evaluated equal; otherwise an error is returned.
// It assumes the cluster CA certificate and key file exist in the CertificatesDir
func CreateEtcdPeerCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
caCert, caKey, err := loadCertificateAuthority(cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName)
if err != nil {
return err
}
etcdPeerCert, etcdPeerKey, err := NewEtcdPeerCertAndKey(cfg, caCert, caKey)
if err != nil {
return err
}
return writeCertificateFilesIfNotExist(
cfg.CertificatesDir,
kubeadmconstants.EtcdPeerCertAndKeyBaseName,
caCert,
etcdPeerCert,
etcdPeerKey,
)
}
// CreateAPIServerEtcdClientCertAndKeyFiles create a new client certificate for the apiserver calling etcd
// If the apiserver-etcd-client certificate and key file already exist in the target folder, they are used only if evaluated equal; otherwise an error is returned.
// It assumes the cluster CA certificate and key file exist in the CertificatesDir
func CreateAPIServerEtcdClientCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
caCert, caKey, err := loadCertificateAuthority(cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName)
if err != nil {
return err
}
apiEtcdClientCert, apiEtcdClientKey, err := NewAPIServerEtcdClientCertAndKey(caCert, caKey)
if err != nil {
return err
}
return writeCertificateFilesIfNotExist(
cfg.CertificatesDir,
kubeadmconstants.APIServerEtcdClientCertAndKeyBaseName,
caCert,
apiEtcdClientCert,
apiEtcdClientKey,
)
}
@ -140,7 +212,7 @@ func CreateServiceAccountKeyAndPublicKeyFiles(cfg *kubeadmapi.MasterConfiguratio
// CreateFrontProxyCACertAndKeyFiles create a self signed front proxy CA certificate and key files.
// Front proxy CA and client certs are used to secure a front proxy authenticator which is used to assert identity
// without the client cert; This is a separte CA, so that front proxy identities cannot hit the API and normal client certs cannot be used
// without the client cert; This is a separate CA, so that front proxy identities cannot hit the API and normal client certs cannot be used
// as front proxies.
// If the front proxy CA certificate and key files already exists in the target folder, they are used only if evaluated equals; otherwise an error is returned.
func CreateFrontProxyCACertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
@ -163,7 +235,7 @@ func CreateFrontProxyCACertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) erro
// It assumes the front proxy CAA certificate and key files should exists into the CertificatesDir
func CreateFrontProxyClientCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
frontProxyCACert, frontProxyCAKey, err := loadCertificateAuthorithy(cfg.CertificatesDir, kubeadmconstants.FrontProxyCACertAndKeyBaseName)
frontProxyCACert, frontProxyCAKey, err := loadCertificateAuthority(cfg.CertificatesDir, kubeadmconstants.FrontProxyCACertAndKeyBaseName)
if err != nil {
return err
}
@ -196,7 +268,7 @@ func NewCACertAndKey() (*x509.Certificate, *rsa.PrivateKey, error) {
// NewAPIServerCertAndKey generate CA certificate for apiserver, signed by the given CA.
func NewAPIServerCertAndKey(cfg *kubeadmapi.MasterConfiguration, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error) {
altNames, err := getAltNames(cfg)
altNames, err := pkiutil.GetAPIServerAltNames(cfg)
if err != nil {
return nil, nil, fmt.Errorf("failure while composing altnames for API server: %v", err)
}
@ -230,6 +302,64 @@ func NewAPIServerKubeletClientCertAndKey(caCert *x509.Certificate, caKey *rsa.Pr
return apiClientCert, apiClientKey, nil
}
// NewEtcdServerCertAndKey generate CA certificate for etcd, signed by the given CA.
func NewEtcdServerCertAndKey(cfg *kubeadmapi.MasterConfiguration, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error) {
altNames, err := pkiutil.GetEtcdAltNames(cfg)
if err != nil {
return nil, nil, fmt.Errorf("failure while composing altnames for etcd: %v", err)
}
config := certutil.Config{
CommonName: kubeadmconstants.EtcdServerCertCommonName,
AltNames: *altNames,
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
}
etcdServerCert, etcdServerKey, err := pkiutil.NewCertAndKey(caCert, caKey, config)
if err != nil {
return nil, nil, fmt.Errorf("failure while creating etcd key and certificate: %v", err)
}
return etcdServerCert, etcdServerKey, nil
}
// NewEtcdPeerCertAndKey generate CA certificate for etcd peering, signed by the given CA.
func NewEtcdPeerCertAndKey(cfg *kubeadmapi.MasterConfiguration, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error) {
altNames, err := pkiutil.GetEtcdPeerAltNames(cfg)
if err != nil {
return nil, nil, fmt.Errorf("failure while composing altnames for etcd peering: %v", err)
}
config := certutil.Config{
CommonName: kubeadmconstants.EtcdPeerCertCommonName,
AltNames: *altNames,
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
}
etcdPeerCert, etcdPeerKey, err := pkiutil.NewCertAndKey(caCert, caKey, config)
if err != nil {
return nil, nil, fmt.Errorf("failure while creating etcd peer key and certificate: %v", err)
}
return etcdPeerCert, etcdPeerKey, nil
}
// NewAPIServerEtcdClientCertAndKey generate CA certificate for the apiservers to connect to etcd securely, signed by the given CA.
func NewAPIServerEtcdClientCertAndKey(caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error) {
config := certutil.Config{
CommonName: kubeadmconstants.APIServerEtcdClientCertCommonName,
Organization: []string{kubeadmconstants.MastersGroup},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
}
apiClientCert, apiClientKey, err := pkiutil.NewCertAndKey(caCert, caKey, config)
if err != nil {
return nil, nil, fmt.Errorf("failure while creating API server etcd client key and certificate: %v", err)
}
return apiClientCert, apiClientKey, nil
}
// NewServiceAccountSigningKey generate public/private key pairs for signing service account tokens.
func NewServiceAccountSigningKey() (*rsa.PrivateKey, error) {
@ -268,28 +398,28 @@ func NewFrontProxyClientCertAndKey(frontProxyCACert *x509.Certificate, frontProx
return frontProxyClientCert, frontProxyClientKey, nil
}
// loadCertificateAuthorithy loads certificate authorithy
func loadCertificateAuthorithy(pkiDir string, baseName string) (*x509.Certificate, *rsa.PrivateKey, error) {
// Checks if certificate authorithy exists in the PKI directory
// loadCertificateAuthority loads certificate authority
func loadCertificateAuthority(pkiDir string, baseName string) (*x509.Certificate, *rsa.PrivateKey, error) {
// Checks if certificate authority exists in the PKI directory
if !pkiutil.CertOrKeyExist(pkiDir, baseName) {
return nil, nil, fmt.Errorf("couldn't load %s certificate authorithy from %s", baseName, pkiDir)
return nil, nil, fmt.Errorf("couldn't load %s certificate authority from %s", baseName, pkiDir)
}
// Try to load certificate authorithy .crt and .key from the PKI directory
// Try to load certificate authority .crt and .key from the PKI directory
caCert, caKey, err := pkiutil.TryLoadCertAndKeyFromDisk(pkiDir, baseName)
if err != nil {
return nil, nil, fmt.Errorf("failure loading %s certificate authorithy: %v", baseName, err)
return nil, nil, fmt.Errorf("failure loading %s certificate authority: %v", baseName, err)
}
// Make sure the loaded CA cert actually is a CA
if !caCert.IsCA {
return nil, nil, fmt.Errorf("%s certificate is not a certificate authorithy", baseName)
return nil, nil, fmt.Errorf("%s certificate is not a certificate authority", baseName)
}
return caCert, caKey, nil
}
// writeCertificateAuthorithyFilesIfNotExist write a new certificate Authorithy to the given path.
// writeCertificateAuthorithyFilesIfNotExist write a new certificate Authority to the given path.
// If there already is a certificate file at the given path; kubeadm tries to load it and check if the values in the
// existing and the expected certificate equals. If they do; kubeadm will just skip writing the file as it's up-to-date,
// otherwise this function returns an error.
@ -309,7 +439,7 @@ func writeCertificateAuthorithyFilesIfNotExist(pkiDir string, baseName string, c
return fmt.Errorf("certificate %s is not a CA", baseName)
}
// kubeadm doesn't validate the existing certificate Authorithy more than this;
// kubeadm doesn't validate the existing certificate Authority more than this;
// Basically, if we find a certificate file with the same path; and it is a CA
// kubeadm thinks those files are equal and doesn't bother writing a new file
fmt.Printf("[certificates] Using the existing %s certificate and key.\n", baseName)
@ -346,7 +476,7 @@ func writeCertificateFilesIfNotExist(pkiDir string, baseName string, signingCert
// kubeadm doesn't validate the existing certificate more than this;
// Basically, if we find a certificate file with the same path; and it is signed by
// the expected certificate authorithy, kubeadm thinks those files are equal and
// the expected certificate authority, kubeadm thinks those files are equal and
// doesn't bother writing a new file
fmt.Printf("[certificates] Using the existing %s certificate and key.\n", baseName)
} else {
@ -478,7 +608,7 @@ func validateSignedCert(l certKeyLocation) error {
// Try to load CA
caCert, err := pkiutil.TryLoadCertFromDisk(l.pkiDir, l.caBaseName)
if err != nil {
return fmt.Errorf("failure loading certificate authorithy for %s: %v", l.uxName, err)
return fmt.Errorf("failure loading certificate authority for %s: %v", l.uxName, err)
}
// Try to load key and signed certificate
@ -503,50 +633,3 @@ func validatePrivatePublicKey(l certKeyLocation) error {
}
return nil
}
// getAltNames builds an AltNames object for to be used when generating apiserver certificate
func getAltNames(cfg *kubeadmapi.MasterConfiguration) (*certutil.AltNames, error) {
// advertise address
advertiseAddress := net.ParseIP(cfg.API.AdvertiseAddress)
if advertiseAddress == nil {
return nil, fmt.Errorf("error parsing API AdvertiseAddress %v: is not a valid textual representation of an IP address", cfg.API.AdvertiseAddress)
}
// internal IP address for the API server
_, svcSubnet, err := net.ParseCIDR(cfg.Networking.ServiceSubnet)
if err != nil {
return nil, fmt.Errorf("error parsing CIDR %q: %v", cfg.Networking.ServiceSubnet, err)
}
internalAPIServerVirtualIP, err := ipallocator.GetIndexedIP(svcSubnet, 1)
if err != nil {
return nil, fmt.Errorf("unable to get first IP address from the given CIDR (%s): %v", svcSubnet.String(), err)
}
// create AltNames with defaults DNSNames/IPs
altNames := &certutil.AltNames{
DNSNames: []string{
cfg.NodeName,
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
fmt.Sprintf("kubernetes.default.svc.%s", cfg.Networking.DNSDomain),
},
IPs: []net.IP{
internalAPIServerVirtualIP,
advertiseAddress,
},
}
// adds additional SAN
for _, altname := range cfg.APIServerCertSANs {
if ip := net.ParseIP(altname); ip != nil {
altNames.IPs = append(altNames.IPs, ip)
} else if len(validation.IsDNS1123Subdomain(altname)) == 0 {
altNames.DNSNames = append(altNames.DNSNames, altname)
}
}
return altNames, nil
}

View File

@ -258,51 +258,6 @@ func TestWriteKeyFilesIfNotExist(t *testing.T) {
}
}
func TestGetAltNames(t *testing.T) {
hostname := "valid-hostname"
advertiseIP := "1.2.3.4"
cfg := &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{AdvertiseAddress: advertiseIP},
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
NodeName: hostname,
}
altNames, err := getAltNames(cfg)
if err != nil {
t.Fatalf("failed calling getAltNames: %v", err)
}
expectedDNSNames := []string{hostname, "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster.local"}
for _, DNSName := range expectedDNSNames {
found := false
for _, val := range altNames.DNSNames {
if val == DNSName {
found = true
break
}
}
if !found {
t.Errorf("altNames does not contain DNSName %s", DNSName)
}
}
expectedIPAddresses := []string{"10.96.0.1", advertiseIP}
for _, IPAddress := range expectedIPAddresses {
found := false
for _, val := range altNames.IPs {
if val.Equal(net.ParseIP(IPAddress)) {
found = true
break
}
}
if !found {
t.Errorf("altNames does not contain IPAddress %s", IPAddress)
}
}
}
func TestNewCACertAndKey(t *testing.T) {
caCert, _, err := NewCACertAndKey()
if err != nil {
@ -320,7 +275,7 @@ func TestNewAPIServerCertAndKey(t *testing.T) {
cfg := &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{AdvertiseAddress: addr},
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
NodeName: "valid-hostname",
NodeName: hostname,
}
caCert, caKey, err := NewCACertAndKey()
if err != nil {
@ -345,14 +300,93 @@ func TestNewAPIServerKubeletClientCertAndKey(t *testing.T) {
t.Fatalf("failed creation of ca cert and key: %v", err)
}
apiClientCert, _, err := NewAPIServerKubeletClientCertAndKey(caCert, caKey)
apiKubeletClientCert, _, err := NewAPIServerKubeletClientCertAndKey(caCert, caKey)
if err != nil {
t.Fatalf("failed creation of cert and key: %v", err)
}
certstestutil.AssertCertificateIsSignedByCa(t, apiClientCert, caCert)
certstestutil.AssertCertificateHasClientAuthUsage(t, apiClientCert)
certstestutil.AssertCertificateHasOrganizations(t, apiClientCert, kubeadmconstants.MastersGroup)
certstestutil.AssertCertificateIsSignedByCa(t, apiKubeletClientCert, caCert)
certstestutil.AssertCertificateHasClientAuthUsage(t, apiKubeletClientCert)
certstestutil.AssertCertificateHasOrganizations(t, apiKubeletClientCert, kubeadmconstants.MastersGroup)
}
func TestNewEtcdServerCertAndKey(t *testing.T) {
proxy := "user-etcd-proxy"
proxyIP := "10.10.10.100"
cfg := &kubeadmapi.MasterConfiguration{
Etcd: kubeadmapi.Etcd{
ServerCertSANs: []string{
proxy,
proxyIP,
},
},
}
caCert, caKey, err := NewCACertAndKey()
if err != nil {
t.Fatalf("failed creation of ca cert and key: %v", err)
}
etcdServerCert, _, err := NewEtcdServerCertAndKey(cfg, caCert, caKey)
if err != nil {
t.Fatalf("failed creation of cert and key: %v", err)
}
certstestutil.AssertCertificateIsSignedByCa(t, etcdServerCert, caCert)
certstestutil.AssertCertificateHasServerAuthUsage(t, etcdServerCert)
certstestutil.AssertCertificateHasDNSNames(t, etcdServerCert, "localhost", proxy)
certstestutil.AssertCertificateHasIPAddresses(t, etcdServerCert, net.ParseIP("127.0.0.1"), net.ParseIP(proxyIP))
}
func TestNewEtcdPeerCertAndKey(t *testing.T) {
hostname := "valid-hostname"
proxy := "user-etcd-proxy"
proxyIP := "10.10.10.100"
advertiseAddresses := []string{"1.2.3.4", "1:2:3::4"}
for _, addr := range advertiseAddresses {
cfg := &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{AdvertiseAddress: addr},
NodeName: hostname,
Etcd: kubeadmapi.Etcd{
PeerCertSANs: []string{
proxy,
proxyIP,
},
},
}
caCert, caKey, err := NewCACertAndKey()
if err != nil {
t.Fatalf("failed creation of ca cert and key: %v", err)
}
etcdPeerCert, _, err := NewEtcdPeerCertAndKey(cfg, caCert, caKey)
if err != nil {
t.Fatalf("failed creation of cert and key: %v", err)
}
certstestutil.AssertCertificateIsSignedByCa(t, etcdPeerCert, caCert)
certstestutil.AssertCertificateHasServerAuthUsage(t, etcdPeerCert)
certstestutil.AssertCertificateHasClientAuthUsage(t, etcdPeerCert)
certstestutil.AssertCertificateHasDNSNames(t, etcdPeerCert, hostname, proxy)
certstestutil.AssertCertificateHasIPAddresses(t, etcdPeerCert, net.ParseIP(addr), net.ParseIP(proxyIP))
}
}
func TestNewAPIServerEtcdClientCertAndKey(t *testing.T) {
caCert, caKey, err := NewCACertAndKey()
if err != nil {
t.Fatalf("failed creation of ca cert and key: %v", err)
}
apiEtcdClientCert, _, err := NewAPIServerEtcdClientCertAndKey(caCert, caKey)
if err != nil {
t.Fatalf("failed creation of cert and key: %v", err)
}
certstestutil.AssertCertificateIsSignedByCa(t, apiEtcdClientCert, caCert)
certstestutil.AssertCertificateHasClientAuthUsage(t, apiEtcdClientCert)
certstestutil.AssertCertificateHasOrganizations(t, apiEtcdClientCert, kubeadmconstants.MastersGroup)
}
func TestNewNewServiceAccountSigningKey(t *testing.T) {
@ -549,6 +583,9 @@ func TestCreateCertificateFilesMethods(t *testing.T) {
kubeadmconstants.CACertName, kubeadmconstants.CAKeyName,
kubeadmconstants.APIServerCertName, kubeadmconstants.APIServerKeyName,
kubeadmconstants.APIServerKubeletClientCertName, kubeadmconstants.APIServerKubeletClientKeyName,
kubeadmconstants.EtcdServerCertName, kubeadmconstants.EtcdServerKeyName,
kubeadmconstants.EtcdPeerCertName, kubeadmconstants.EtcdPeerKeyName,
kubeadmconstants.APIServerEtcdClientCertName, kubeadmconstants.APIServerEtcdClientKeyName,
kubeadmconstants.ServiceAccountPrivateKeyName, kubeadmconstants.ServiceAccountPublicKeyName,
kubeadmconstants.FrontProxyCACertName, kubeadmconstants.FrontProxyCAKeyName,
kubeadmconstants.FrontProxyClientCertName, kubeadmconstants.FrontProxyClientKeyName,
@ -568,6 +605,21 @@ func TestCreateCertificateFilesMethods(t *testing.T) {
createFunc: CreateAPIServerKubeletClientCertAndKeyFiles,
expectedFiles: []string{kubeadmconstants.APIServerKubeletClientCertName, kubeadmconstants.APIServerKubeletClientKeyName},
},
{
setupFunc: CreateCACertAndKeyfiles,
createFunc: CreateEtcdServerCertAndKeyFiles,
expectedFiles: []string{kubeadmconstants.EtcdServerCertName, kubeadmconstants.EtcdServerKeyName},
},
{
setupFunc: CreateCACertAndKeyfiles,
createFunc: CreateEtcdPeerCertAndKeyFiles,
expectedFiles: []string{kubeadmconstants.EtcdPeerCertName, kubeadmconstants.EtcdPeerKeyName},
},
{
setupFunc: CreateCACertAndKeyfiles,
createFunc: CreateAPIServerEtcdClientCertAndKeyFiles,
expectedFiles: []string{kubeadmconstants.APIServerEtcdClientCertName, kubeadmconstants.APIServerEtcdClientKeyName},
},
{
createFunc: CreateServiceAccountKeyAndPublicKeyFiles,
expectedFiles: []string{kubeadmconstants.ServiceAccountPrivateKeyName, kubeadmconstants.ServiceAccountPublicKeyName},

View File

@ -23,7 +23,9 @@ package certs
INPUTS:
From MasterConfiguration
.API.AdvertiseAddress is an optional parameter that can be passed for an extra addition to the SAN IPs
.APIServerCertSANs is needed for knowing which DNS names and IPs the API Server serving cert should be valid for
.APIServerCertSANs is an optional parameter for adding DNS names and IPs to the API Server serving cert SAN
.Etcd.ServerCertSANs is an optional parameter for adding DNS names and IPs to the etcd serving cert SAN
.Etcd.PeerCertSANs is an optional parameter for adding DNS names and IPs to the etcd peer cert SAN
.Networking.DNSDomain is needed for knowing which DNS name the internal kubernetes service has
.Networking.ServiceSubnet is needed for knowing which IP the internal kubernetes service is going to point to
.CertificatesDir is required for knowing where all certificates should be stored
@ -36,6 +38,12 @@ package certs
- apiserver.key
- apiserver-kubelet-client.crt
- apiserver-kubelet-client.key
- apiserver-etcd-client.crt
- apiserver-etcd-client.key
- etcd/server.crt
- etcd/server.key
- etcd/peer.crt
- etcd/peer.key
- sa.pub
- sa.key
- front-proxy-ca.crt

View File

@ -9,16 +9,24 @@ load(
go_test(
name = "go_default_test",
srcs = ["pki_helpers_test.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil",
library = ":go_default_library",
deps = ["//vendor/k8s.io/client-go/util/cert:go_default_library"],
embed = [":go_default_library"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//vendor/k8s.io/client-go/util/cert:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = ["pki_helpers.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil",
deps = ["//vendor/k8s.io/client-go/util/cert:go_default_library"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//pkg/registry/core/service/ipallocator:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library",
"//vendor/k8s.io/client-go/util/cert:go_default_library",
],
)
filegroup(

View File

@ -20,11 +20,16 @@ import (
"crypto/rsa"
"crypto/x509"
"fmt"
"net"
"os"
"path/filepath"
"time"
"k8s.io/apimachinery/pkg/util/validation"
certutil "k8s.io/client-go/util/cert"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
)
// NewCertificateAuthority creates new certificate and private key for the certificate authority
@ -125,7 +130,7 @@ func WritePublicKey(pkiPath, name string, key *rsa.PublicKey) error {
return nil
}
// CertOrKeyExist retuns a boolean whether the cert or the key exists
// CertOrKeyExist returns a boolean whether the cert or the key exists
func CertOrKeyExist(pkiPath, name string) bool {
certificatePath, privateKeyPath := pathsForCertAndKey(pkiPath, name)
@ -246,3 +251,106 @@ func pathForKey(pkiPath, name string) string {
func pathForPublicKey(pkiPath, name string) string {
return filepath.Join(pkiPath, fmt.Sprintf("%s.pub", name))
}
// GetAPIServerAltNames builds an AltNames object for to be used when generating apiserver certificate
func GetAPIServerAltNames(cfg *kubeadmapi.MasterConfiguration) (*certutil.AltNames, error) {
// advertise address
advertiseAddress := net.ParseIP(cfg.API.AdvertiseAddress)
if advertiseAddress == nil {
return nil, fmt.Errorf("error parsing API AdvertiseAddress %v: is not a valid textual representation of an IP address", cfg.API.AdvertiseAddress)
}
// internal IP address for the API server
_, svcSubnet, err := net.ParseCIDR(cfg.Networking.ServiceSubnet)
if err != nil {
return nil, fmt.Errorf("error parsing CIDR %q: %v", cfg.Networking.ServiceSubnet, err)
}
internalAPIServerVirtualIP, err := ipallocator.GetIndexedIP(svcSubnet, 1)
if err != nil {
return nil, fmt.Errorf("unable to get first IP address from the given CIDR (%s): %v", svcSubnet.String(), err)
}
// create AltNames with defaults DNSNames/IPs
altNames := &certutil.AltNames{
DNSNames: []string{
cfg.NodeName,
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
fmt.Sprintf("kubernetes.default.svc.%s", cfg.Networking.DNSDomain),
},
IPs: []net.IP{
internalAPIServerVirtualIP,
advertiseAddress,
},
}
// add api server dns advertise address
if len(cfg.API.ControlPlaneEndpoint) > 0 {
altNames.DNSNames = append(altNames.DNSNames, cfg.API.ControlPlaneEndpoint)
}
appendSANsToAltNames(altNames, cfg.APIServerCertSANs, kubeadmconstants.APIServerCertName)
return altNames, nil
}
// GetEtcdAltNames builds an AltNames object for generating the etcd server certificate.
// `localhost` is included in the SAN since this is the interface the etcd static pod listens on.
// Hostname and `API.AdvertiseAddress` are excluded since etcd does not listen on this interface by default.
// The user can override the listen address with `Etcd.ExtraArgs` and add SANs with `Etcd.ServerCertSANs`.
func GetEtcdAltNames(cfg *kubeadmapi.MasterConfiguration) (*certutil.AltNames, error) {
// create AltNames with defaults DNSNames/IPs
altNames := &certutil.AltNames{
DNSNames: []string{"localhost"},
IPs: []net.IP{net.IPv4(127, 0, 0, 1)},
}
appendSANsToAltNames(altNames, cfg.Etcd.ServerCertSANs, kubeadmconstants.EtcdServerCertName)
return altNames, nil
}
// GetEtcdPeerAltNames builds an AltNames object for generating the etcd peer certificate.
// `localhost` is excluded from the SAN since etcd will not refer to itself as a peer.
// Hostname and `API.AdvertiseAddress` are included if the user chooses to promote the single node etcd cluster into a multi-node one.
// The user can override the listen address with `Etcd.ExtraArgs` and add SANs with `Etcd.PeerCertSANs`.
func GetEtcdPeerAltNames(cfg *kubeadmapi.MasterConfiguration) (*certutil.AltNames, error) {
// advertise address
advertiseAddress := net.ParseIP(cfg.API.AdvertiseAddress)
if advertiseAddress == nil {
return nil, fmt.Errorf("error parsing API AdvertiseAddress %v: is not a valid textual representation of an IP address", cfg.API.AdvertiseAddress)
}
// create AltNames with defaults DNSNames/IPs
altNames := &certutil.AltNames{
DNSNames: []string{cfg.NodeName},
IPs: []net.IP{advertiseAddress},
}
appendSANsToAltNames(altNames, cfg.Etcd.PeerCertSANs, kubeadmconstants.EtcdPeerCertName)
return altNames, nil
}
// appendSANsToAltNames parses SANs from as list of strings and adds them to altNames for use on a specific cert
// altNames is passed in with a pointer, and the struct is modified
// valid IP address strings are parsed and added to altNames.IPs as net.IP's
// RFC-1123 compliant DNS strings are added to altNames.DNSNames as strings
// certNames is used to print user facing warnings and should be the name of the cert the altNames will be used for
func appendSANsToAltNames(altNames *certutil.AltNames, SANs []string, certName string) {
for _, altname := range SANs {
if ip := net.ParseIP(altname); ip != nil {
altNames.IPs = append(altNames.IPs, ip)
} else if len(validation.IsDNS1123Subdomain(altname)) == 0 {
altNames.DNSNames = append(altNames.DNSNames, altname)
} else {
fmt.Printf(
"[certificates] WARNING: '%s' was not added to the '%s' SAN, because it is not a valid IP or RFC-1123 compliant DNS entry\n",
altname,
certName,
)
}
}
}

View File

@ -21,10 +21,12 @@ import (
"crypto/rsa"
"crypto/x509"
"io/ioutil"
"net"
"os"
"testing"
certutil "k8s.io/client-go/util/cert"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
)
func TestNewCertificateAuthority(t *testing.T) {
@ -432,3 +434,154 @@ func TestPathForPublicKey(t *testing.T) {
t.Errorf("unexpected certificate path: %s", pubPath)
}
}
func TestGetAPIServerAltNames(t *testing.T) {
hostname := "valid-hostname"
advertiseIP := "1.2.3.4"
controlPlaneEndpoint := "api.k8s.io"
cfg := &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{AdvertiseAddress: advertiseIP, ControlPlaneEndpoint: controlPlaneEndpoint},
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
NodeName: hostname,
APIServerCertSANs: []string{"10.1.245.94", "10.1.245.95", "1.2.3.L", "invalid,commas,in,DNS"},
}
altNames, err := GetAPIServerAltNames(cfg)
if err != nil {
t.Fatalf("failed calling GetAPIServerAltNames: %v", err)
}
expectedDNSNames := []string{hostname, "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster.local", controlPlaneEndpoint}
for _, DNSName := range expectedDNSNames {
found := false
for _, val := range altNames.DNSNames {
if val == DNSName {
found = true
break
}
}
if !found {
t.Errorf("altNames does not contain DNSName %s", DNSName)
}
}
expectedIPAddresses := []string{"10.96.0.1", advertiseIP, "10.1.245.94", "10.1.245.95"}
for _, IPAddress := range expectedIPAddresses {
found := false
for _, val := range altNames.IPs {
if val.Equal(net.ParseIP(IPAddress)) {
found = true
break
}
}
if !found {
t.Errorf("altNames does not contain IPAddress %s", IPAddress)
}
}
}
func TestGetEtcdAltNames(t *testing.T) {
proxy := "user-etcd-proxy"
proxyIP := "10.10.10.100"
cfg := &kubeadmapi.MasterConfiguration{
Etcd: kubeadmapi.Etcd{
ServerCertSANs: []string{
proxy,
proxyIP,
"1.2.3.L",
"invalid,commas,in,DNS",
},
},
}
altNames, err := GetEtcdAltNames(cfg)
if err != nil {
t.Fatalf("failed calling GetEtcdAltNames: %v", err)
}
expectedDNSNames := []string{"localhost", proxy}
for _, DNSName := range expectedDNSNames {
found := false
for _, val := range altNames.DNSNames {
if val == DNSName {
found = true
break
}
}
if !found {
t.Errorf("altNames does not contain DNSName %s", DNSName)
}
}
expectedIPAddresses := []string{"127.0.0.1", proxyIP}
for _, IPAddress := range expectedIPAddresses {
found := false
for _, val := range altNames.IPs {
if val.Equal(net.ParseIP(IPAddress)) {
found = true
break
}
}
if !found {
t.Errorf("altNames does not contain IPAddress %s", IPAddress)
}
}
}
func TestGetEtcdPeerAltNames(t *testing.T) {
hostname := "valid-hostname"
proxy := "user-etcd-proxy"
proxyIP := "10.10.10.100"
advertiseIP := "1.2.3.4"
cfg := &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{AdvertiseAddress: advertiseIP},
NodeName: hostname,
Etcd: kubeadmapi.Etcd{
PeerCertSANs: []string{
proxy,
proxyIP,
"1.2.3.L",
"invalid,commas,in,DNS",
},
},
}
altNames, err := GetEtcdPeerAltNames(cfg)
if err != nil {
t.Fatalf("failed calling GetEtcdPeerAltNames: %v", err)
}
expectedDNSNames := []string{hostname, proxy}
for _, DNSName := range expectedDNSNames {
found := false
for _, val := range altNames.DNSNames {
if val == DNSName {
found = true
break
}
}
if !found {
t.Errorf("altNames does not contain DNSName %s", DNSName)
}
}
expectedIPAddresses := []string{advertiseIP, proxyIP}
for _, IPAddress := range expectedIPAddresses {
found := false
for _, val := range altNames.IPs {
if val.Equal(net.ParseIP(IPAddress)) {
found = true
break
}
}
if !found {
t.Errorf("altNames does not contain IPAddress %s", IPAddress)
}
}
}

View File

@ -12,8 +12,7 @@ go_test(
"manifests_test.go",
"volumes_test.go",
],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane",
library = ":go_default_library",
embed = [":go_default_library"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
@ -21,6 +20,7 @@ go_test(
"//cmd/kubeadm/app/phases/certs:go_default_library",
"//cmd/kubeadm/test:go_default_library",
"//pkg/master/reconcilers:go_default_library",
"//pkg/util/pointer:go_default_library",
"//pkg/util/version:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
],
@ -44,6 +44,7 @@ go_library(
"//cmd/kubeadm/app/util/staticpod:go_default_library",
"//pkg/kubeapiserver/authorizer/modes:go_default_library",
"//pkg/master/reconcilers:go_default_library",
"//pkg/util/pointer:go_default_library",
"//pkg/util/version:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",

View File

@ -21,6 +21,7 @@ import (
"net"
"os"
"path/filepath"
"strconv"
"strings"
"k8s.io/api/core/v1"
@ -35,6 +36,7 @@ import (
staticpodutil "k8s.io/kubernetes/cmd/kubeadm/app/util/staticpod"
authzmodes "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes"
"k8s.io/kubernetes/pkg/master/reconcilers"
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
"k8s.io/kubernetes/pkg/util/version"
)
@ -42,9 +44,8 @@ import (
const (
DefaultCloudConfigPath = "/etc/kubernetes/cloud-config"
defaultV18AdmissionControl = "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota"
deprecatedV19AdmissionControl = "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota"
defaultV19AdmissionControl = "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota"
deprecatedV19AdmissionControl = "NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
defaultV19AdmissionControl = "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
)
// CreateInitStaticPodManifestFiles will write all static pod manifest files needed to bring up the control plane.
@ -68,7 +69,7 @@ func CreateSchedulerStaticPodManifestFile(manifestDir string, cfg *kubeadmapi.Ma
}
// GetStaticPodSpecs returns all staticPodSpecs actualized to the context of the current MasterConfiguration
// NB. this methods holds the information about how kubeadm creates static pod mainfests.
// NB. this methods holds the information about how kubeadm creates static pod manifests.
func GetStaticPodSpecs(cfg *kubeadmapi.MasterConfiguration, k8sVersion *version.Version) map[string]v1.Pod {
// Get the required hostpath mounts
mounts := getHostPathVolumesForTheControlPlane(cfg)
@ -76,34 +77,49 @@ func GetStaticPodSpecs(cfg *kubeadmapi.MasterConfiguration, k8sVersion *version.
// Prepare static pod specs
staticPodSpecs := map[string]v1.Pod{
kubeadmconstants.KubeAPIServer: staticpodutil.ComponentPod(v1.Container{
Name: kubeadmconstants.KubeAPIServer,
Image: images.GetCoreImage(kubeadmconstants.KubeAPIServer, cfg.GetControlPlaneImageRepository(), cfg.KubernetesVersion, cfg.UnifiedControlPlaneImage),
Command: getAPIServerCommand(cfg, k8sVersion),
VolumeMounts: staticpodutil.VolumeMountMapToSlice(mounts.GetVolumeMounts(kubeadmconstants.KubeAPIServer)),
LivenessProbe: staticpodutil.ComponentProbe(cfg, kubeadmconstants.KubeAPIServer, int(cfg.API.BindPort), "/healthz", v1.URISchemeHTTPS),
Resources: staticpodutil.ComponentResources("250m"),
Env: getProxyEnvVars(),
Name: kubeadmconstants.KubeAPIServer,
Image: images.GetCoreImage(kubeadmconstants.KubeAPIServer, cfg.GetControlPlaneImageRepository(), cfg.KubernetesVersion, cfg.UnifiedControlPlaneImage),
ImagePullPolicy: cfg.ImagePullPolicy,
Command: getAPIServerCommand(cfg, k8sVersion),
VolumeMounts: staticpodutil.VolumeMountMapToSlice(mounts.GetVolumeMounts(kubeadmconstants.KubeAPIServer)),
LivenessProbe: staticpodutil.ComponentProbe(cfg, kubeadmconstants.KubeAPIServer, int(cfg.API.BindPort), "/healthz", v1.URISchemeHTTPS),
Resources: staticpodutil.ComponentResources("250m"),
Env: getProxyEnvVars(),
}, mounts.GetVolumes(kubeadmconstants.KubeAPIServer)),
kubeadmconstants.KubeControllerManager: staticpodutil.ComponentPod(v1.Container{
Name: kubeadmconstants.KubeControllerManager,
Image: images.GetCoreImage(kubeadmconstants.KubeControllerManager, cfg.GetControlPlaneImageRepository(), cfg.KubernetesVersion, cfg.UnifiedControlPlaneImage),
Command: getControllerManagerCommand(cfg, k8sVersion),
VolumeMounts: staticpodutil.VolumeMountMapToSlice(mounts.GetVolumeMounts(kubeadmconstants.KubeControllerManager)),
LivenessProbe: staticpodutil.ComponentProbe(cfg, kubeadmconstants.KubeControllerManager, 10252, "/healthz", v1.URISchemeHTTP),
Resources: staticpodutil.ComponentResources("200m"),
Env: getProxyEnvVars(),
Name: kubeadmconstants.KubeControllerManager,
Image: images.GetCoreImage(kubeadmconstants.KubeControllerManager, cfg.GetControlPlaneImageRepository(), cfg.KubernetesVersion, cfg.UnifiedControlPlaneImage),
ImagePullPolicy: cfg.ImagePullPolicy,
Command: getControllerManagerCommand(cfg, k8sVersion),
VolumeMounts: staticpodutil.VolumeMountMapToSlice(mounts.GetVolumeMounts(kubeadmconstants.KubeControllerManager)),
LivenessProbe: staticpodutil.ComponentProbe(cfg, kubeadmconstants.KubeControllerManager, 10252, "/healthz", v1.URISchemeHTTP),
Resources: staticpodutil.ComponentResources("200m"),
Env: getProxyEnvVars(),
}, mounts.GetVolumes(kubeadmconstants.KubeControllerManager)),
kubeadmconstants.KubeScheduler: staticpodutil.ComponentPod(v1.Container{
Name: kubeadmconstants.KubeScheduler,
Image: images.GetCoreImage(kubeadmconstants.KubeScheduler, cfg.GetControlPlaneImageRepository(), cfg.KubernetesVersion, cfg.UnifiedControlPlaneImage),
Command: getSchedulerCommand(cfg),
VolumeMounts: staticpodutil.VolumeMountMapToSlice(mounts.GetVolumeMounts(kubeadmconstants.KubeScheduler)),
LivenessProbe: staticpodutil.ComponentProbe(cfg, kubeadmconstants.KubeScheduler, 10251, "/healthz", v1.URISchemeHTTP),
Resources: staticpodutil.ComponentResources("100m"),
Env: getProxyEnvVars(),
Name: kubeadmconstants.KubeScheduler,
Image: images.GetCoreImage(kubeadmconstants.KubeScheduler, cfg.GetControlPlaneImageRepository(), cfg.KubernetesVersion, cfg.UnifiedControlPlaneImage),
ImagePullPolicy: cfg.ImagePullPolicy,
Command: getSchedulerCommand(cfg),
VolumeMounts: staticpodutil.VolumeMountMapToSlice(mounts.GetVolumeMounts(kubeadmconstants.KubeScheduler)),
LivenessProbe: staticpodutil.ComponentProbe(cfg, kubeadmconstants.KubeScheduler, 10251, "/healthz", v1.URISchemeHTTP),
Resources: staticpodutil.ComponentResources("100m"),
Env: getProxyEnvVars(),
}, mounts.GetVolumes(kubeadmconstants.KubeScheduler)),
}
// Some cloud providers need extra privileges for example to load node information from a config drive
// TODO: when we fully to external cloud providers and the api server and controller manager do not need
// to call out to cloud provider code, we can remove the support for the PrivilegedPods
if cfg.PrivilegedPods {
staticPodSpecs[kubeadmconstants.KubeAPIServer].Spec.Containers[0].SecurityContext = &v1.SecurityContext{
Privileged: utilpointer.BoolPtr(true),
}
staticPodSpecs[kubeadmconstants.KubeControllerManager].Spec.Containers[0].SecurityContext = &v1.SecurityContext{
Privileged: utilpointer.BoolPtr(true),
}
}
return staticPodSpecs
}
@ -167,10 +183,6 @@ func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration, k8sVersion *versio
command := []string{"kube-apiserver"}
if k8sVersion.Minor() == 8 {
defaultArguments["admission-control"] = defaultV18AdmissionControl
}
if cfg.CloudProvider == "aws" || cfg.CloudProvider == "gce" {
defaultArguments["admission-control"] = deprecatedV19AdmissionControl
}
@ -178,21 +190,37 @@ func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration, k8sVersion *versio
command = append(command, kubeadmutil.BuildArgumentListFromMap(defaultArguments, cfg.APIServerExtraArgs)...)
command = append(command, getAuthzParameters(cfg.AuthorizationModes)...)
// Check if the user decided to use an external etcd cluster
// If the user set endpoints for an external etcd cluster
if len(cfg.Etcd.Endpoints) > 0 {
command = append(command, fmt.Sprintf("--etcd-servers=%s", strings.Join(cfg.Etcd.Endpoints, ",")))
} else {
command = append(command, "--etcd-servers=http://127.0.0.1:2379")
}
// Is etcd secured?
if cfg.Etcd.CAFile != "" {
command = append(command, fmt.Sprintf("--etcd-cafile=%s", cfg.Etcd.CAFile))
}
if cfg.Etcd.CertFile != "" && cfg.Etcd.KeyFile != "" {
etcdClientFileArg := fmt.Sprintf("--etcd-certfile=%s", cfg.Etcd.CertFile)
etcdKeyFileArg := fmt.Sprintf("--etcd-keyfile=%s", cfg.Etcd.KeyFile)
command = append(command, etcdClientFileArg, etcdKeyFileArg)
// Use any user supplied etcd certificates
if cfg.Etcd.CAFile != "" {
command = append(command, fmt.Sprintf("--etcd-cafile=%s", cfg.Etcd.CAFile))
}
if cfg.Etcd.CertFile != "" && cfg.Etcd.KeyFile != "" {
etcdClientFileArg := fmt.Sprintf("--etcd-certfile=%s", cfg.Etcd.CertFile)
etcdKeyFileArg := fmt.Sprintf("--etcd-keyfile=%s", cfg.Etcd.KeyFile)
command = append(command, etcdClientFileArg, etcdKeyFileArg)
}
} else {
// Default to etcd static pod on localhost
etcdEndpointsArg := "--etcd-servers=https://127.0.0.1:2379"
etcdCAFileArg := fmt.Sprintf("--etcd-cafile=%s", filepath.Join(cfg.CertificatesDir, kubeadmconstants.CACertName))
etcdClientFileArg := fmt.Sprintf("--etcd-certfile=%s", filepath.Join(cfg.CertificatesDir, kubeadmconstants.APIServerEtcdClientCertName))
etcdKeyFileArg := fmt.Sprintf("--etcd-keyfile=%s", filepath.Join(cfg.CertificatesDir, kubeadmconstants.APIServerEtcdClientKeyName))
command = append(command, etcdEndpointsArg, etcdCAFileArg, etcdClientFileArg, etcdKeyFileArg)
// Warn for unused user supplied variables
if cfg.Etcd.CAFile != "" {
fmt.Printf("[controlplane] WARNING: Configuration for %s CAFile, %s, is unused without providing Endpoints for external %s\n", kubeadmconstants.Etcd, cfg.Etcd.CAFile, kubeadmconstants.Etcd)
}
if cfg.Etcd.CertFile != "" {
fmt.Printf("[controlplane] WARNING: Configuration for %s CertFile, %s, is unused without providing Endpoints for external %s\n", kubeadmconstants.Etcd, cfg.Etcd.CertFile, kubeadmconstants.Etcd)
}
if cfg.Etcd.KeyFile != "" {
fmt.Printf("[controlplane] WARNING: Configuration for %s KeyFile, %s, is unused without providing Endpoints for external %s\n", kubeadmconstants.Etcd, cfg.Etcd.KeyFile, kubeadmconstants.Etcd)
}
}
if cfg.CloudProvider != "" {
@ -212,9 +240,66 @@ func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration, k8sVersion *versio
command = append(command, "--feature-gates=DynamicKubeletConfig=true")
}
if features.Enabled(cfg.FeatureGates, features.Auditing) {
command = append(command, "--audit-policy-file="+kubeadmconstants.GetStaticPodAuditPolicyFile())
command = append(command, "--audit-log-path="+filepath.Join(kubeadmconstants.StaticPodAuditPolicyLogDir, kubeadmconstants.AuditPolicyLogFile))
if cfg.AuditPolicyConfiguration.LogMaxAge == nil {
command = append(command, fmt.Sprintf("--audit-log-maxage=%d", kubeadmapiext.DefaultAuditPolicyLogMaxAge))
} else {
command = append(command, fmt.Sprintf("--audit-log-maxage=%d", *cfg.AuditPolicyConfiguration.LogMaxAge))
}
}
return command
}
// calcNodeCidrSize determines the size of the subnets used on each node, based
// on the pod subnet provided. For IPv4, we assume that the pod subnet will
// be /16 and use /24. If the pod subnet cannot be parsed, the IPv4 value will
// be used (/24).
//
// For IPv6, the algorithm will do two three. First, the node CIDR will be set
// to a multiple of 8, using the available bits for easier readability by user.
// Second, the number of nodes will be 512 to 64K to attempt to maximize the
// number of nodes (see NOTE below). Third, pod networks of /113 and larger will
// be rejected, as the amount of bits available is too small.
//
// A special case is when the pod network size is /112, where /120 will be used,
// only allowing 256 nodes and 256 pods.
//
// If the pod network size is /113 or larger, the node CIDR will be set to the same
// size and this will be rejected later in validation.
//
// NOTE: Currently, the pod network must be /66 or larger. It is not reflected here,
// but a smaller value will fail later validation.
//
// NOTE: Currently, the design allows a maximum of 64K nodes. This algorithm splits
// the available bits to maximize the number used for nodes, but still have the node
// CIDR be a multiple of eight.
//
func calcNodeCidrSize(podSubnet string) string {
maskSize := "24"
if ip, podCidr, err := net.ParseCIDR(podSubnet); err == nil {
if ip.To4() == nil {
var nodeCidrSize int
podNetSize, totalBits := podCidr.Mask.Size()
switch {
case podNetSize == 112:
// Special case, allows 256 nodes, 256 pods/node
nodeCidrSize = 120
case podNetSize < 112:
// Use multiple of 8 for node CIDR, with 512 to 64K nodes
nodeCidrSize = totalBits - ((totalBits-podNetSize-1)/8-1)*8
default:
// Not enough bits, will fail later, when validate
nodeCidrSize = podNetSize
}
maskSize = strconv.Itoa(nodeCidrSize)
}
}
return maskSize
}
// getControllerManagerCommand builds the right controller manager command from the given config object and version
func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration, k8sVersion *version.Version) []string {
defaultArguments := map[string]string{
@ -251,12 +336,7 @@ func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration, k8sVersion
// Let the controller-manager allocate Node CIDRs for the Pod network.
// Each node will get a subspace of the address CIDR provided with --pod-network-cidr.
if cfg.Networking.PodSubnet != "" {
maskSize := "24"
if ip, _, err := net.ParseCIDR(cfg.Networking.PodSubnet); err == nil {
if ip.To4() == nil {
maskSize = "64"
}
}
maskSize := calcNodeCidrSize(cfg.Networking.PodSubnet)
command = append(command, "--allocate-node-cidrs=true", "--cluster-cidr="+cfg.Networking.PodSubnet,
"--node-cidr-mask-size="+maskSize)
}

View File

@ -32,6 +32,7 @@ import (
"k8s.io/kubernetes/pkg/util/version"
testutil "k8s.io/kubernetes/cmd/kubeadm/test"
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
)
const (
@ -43,7 +44,7 @@ func TestGetStaticPodSpecs(t *testing.T) {
// Creates a Master Configuration
cfg := &kubeadmapi.MasterConfiguration{
KubernetesVersion: "v1.8.0",
KubernetesVersion: "v1.9.0",
}
// Executes GetStaticPodSpecs
@ -74,7 +75,7 @@ func TestGetStaticPodSpecs(t *testing.T) {
// Assert each specs refers to the right pod
if spec.Spec.Containers[0].Name != assertion.staticPodName {
t.Errorf("getKubeConfigSpecs spec for %s contains pod %s, expectes %s", assertion.staticPodName, spec.Spec.Containers[0].Name, assertion.staticPodName)
t.Errorf("getKubeConfigSpecs spec for %s contains pod %s, expects %s", assertion.staticPodName, spec.Spec.Containers[0].Name, assertion.staticPodName)
}
} else {
@ -115,7 +116,7 @@ func TestCreateStaticPodFilesAndWrappers(t *testing.T) {
// Creates a Master Configuration
cfg := &kubeadmapi.MasterConfiguration{
KubernetesVersion: "v1.8.0",
KubernetesVersion: "v1.9.0",
}
// Execute createStaticPodFunction
@ -135,6 +136,58 @@ func TestCreateStaticPodFilesAndWrappers(t *testing.T) {
}
}
func TestCreatePrivilegedContainerForOpenStack(t *testing.T) {
// Creates a Master Configuration with OpenStack cloud provider
var staticPodNames = []string{
kubeadmconstants.KubeAPIServer,
kubeadmconstants.KubeControllerManager,
}
var assertions = []struct {
cloudProvider string
privilegedPods bool
expectedPrivilege bool
}{
{
cloudProvider: "",
expectedPrivilege: false,
},
{
cloudProvider: "aws",
expectedPrivilege: false,
},
{
cloudProvider: "openstack",
privilegedPods: true,
expectedPrivilege: true,
},
}
for _, assertion := range assertions {
cfg := &kubeadmapi.MasterConfiguration{
KubernetesVersion: "v1.9.0",
CloudProvider: assertion.cloudProvider,
PrivilegedPods: assertion.privilegedPods,
}
k8sVersion, _ := version.ParseSemantic(cfg.KubernetesVersion)
specs := GetStaticPodSpecs(cfg, k8sVersion)
for _, podname := range staticPodNames {
spec, _ := specs[podname]
sc := spec.Spec.Containers[0].SecurityContext
if assertion.expectedPrivilege == true {
if sc == nil || sc.Privileged == nil || *sc.Privileged == false {
t.Errorf("GetStaticPodSpecs did not enable privileged containers in %s pod for provider %s", podname, assertion.cloudProvider)
}
} else {
if sc != nil && sc.Privileged != nil && *sc.Privileged == true {
t.Errorf("GetStaticPodSpecs enabled privileged containers in %s pod for provider %s", podname, assertion.cloudProvider)
}
}
}
}
}
func TestGetAPIServerCommand(t *testing.T) {
var tests = []struct {
cfg *kubeadmapi.MasterConfiguration
@ -145,12 +198,12 @@ func TestGetAPIServerCommand(t *testing.T) {
API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "1.2.3.4"},
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.8.0",
KubernetesVersion: "v1.9.0",
},
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota",
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
@ -171,7 +224,10 @@ func TestGetAPIServerCommand(t *testing.T) {
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=1.2.3.4",
"--etcd-servers=http://127.0.0.1:2379",
"--etcd-servers=https://127.0.0.1:2379",
"--etcd-cafile=" + testCertsDir + "/ca.crt",
"--etcd-certfile=" + testCertsDir + "/apiserver-etcd-client.crt",
"--etcd-keyfile=" + testCertsDir + "/apiserver-etcd-client.key",
},
},
{
@ -179,12 +235,12 @@ func TestGetAPIServerCommand(t *testing.T) {
API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "1.2.3.4"},
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.8.0-beta.0",
KubernetesVersion: "v1.9.0-beta.0",
},
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota",
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
@ -205,7 +261,10 @@ func TestGetAPIServerCommand(t *testing.T) {
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=1.2.3.4",
"--etcd-servers=http://127.0.0.1:2379",
"--etcd-servers=https://127.0.0.1:2379",
"--etcd-cafile=" + testCertsDir + "/ca.crt",
"--etcd-certfile=" + testCertsDir + "/apiserver-etcd-client.crt",
"--etcd-keyfile=" + testCertsDir + "/apiserver-etcd-client.key",
},
},
{
@ -213,12 +272,12 @@ func TestGetAPIServerCommand(t *testing.T) {
API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "4.3.2.1"},
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.8.1",
KubernetesVersion: "v1.9.1",
},
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota",
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
@ -239,7 +298,10 @@ func TestGetAPIServerCommand(t *testing.T) {
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=4.3.2.1",
"--etcd-servers=http://127.0.0.1:2379",
"--etcd-servers=https://127.0.0.1:2379",
"--etcd-cafile=" + testCertsDir + "/ca.crt",
"--etcd-certfile=" + testCertsDir + "/apiserver-etcd-client.crt",
"--etcd-keyfile=" + testCertsDir + "/apiserver-etcd-client.key",
},
},
{
@ -248,12 +310,12 @@ func TestGetAPIServerCommand(t *testing.T) {
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
Etcd: kubeadmapi.Etcd{CertFile: "fiz", KeyFile: "faz"},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.8.0",
KubernetesVersion: "v1.9.0",
},
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota",
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
@ -274,9 +336,10 @@ func TestGetAPIServerCommand(t *testing.T) {
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=4.3.2.1",
"--etcd-servers=http://127.0.0.1:2379",
"--etcd-certfile=fiz",
"--etcd-keyfile=faz",
"--etcd-servers=https://127.0.0.1:2379",
"--etcd-cafile=" + testCertsDir + "/ca.crt",
"--etcd-certfile=" + testCertsDir + "/apiserver-etcd-client.crt",
"--etcd-keyfile=" + testCertsDir + "/apiserver-etcd-client.key",
},
},
{
@ -285,12 +348,17 @@ func TestGetAPIServerCommand(t *testing.T) {
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
Etcd: kubeadmapi.Etcd{CertFile: "fiz", KeyFile: "faz"},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.8.3",
KubernetesVersion: "v1.9.3",
AuditPolicyConfiguration: kubeadmapi.AuditPolicyConfiguration{
Path: "/foo/bar",
LogDir: "/foo/baz",
LogMaxAge: utilpointer.Int32Ptr(10),
}, // ignored without the feature gate
},
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota",
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
@ -311,9 +379,10 @@ func TestGetAPIServerCommand(t *testing.T) {
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=4.3.2.1",
"--etcd-servers=http://127.0.0.1:2379",
"--etcd-certfile=fiz",
"--etcd-keyfile=faz",
"--etcd-servers=https://127.0.0.1:2379",
"--etcd-cafile=" + testCertsDir + "/ca.crt",
"--etcd-certfile=" + testCertsDir + "/apiserver-etcd-client.crt",
"--etcd-keyfile=" + testCertsDir + "/apiserver-etcd-client.key",
},
},
{
@ -322,12 +391,12 @@ func TestGetAPIServerCommand(t *testing.T) {
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
Etcd: kubeadmapi.Etcd{CertFile: "fiz", KeyFile: "faz"},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.8.0",
KubernetesVersion: "v1.9.0",
},
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota",
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
@ -348,9 +417,10 @@ func TestGetAPIServerCommand(t *testing.T) {
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=2001:db8::1",
"--etcd-servers=http://127.0.0.1:2379",
"--etcd-certfile=fiz",
"--etcd-keyfile=faz",
"--etcd-servers=https://127.0.0.1:2379",
"--etcd-cafile=" + testCertsDir + "/ca.crt",
"--etcd-certfile=" + testCertsDir + "/apiserver-etcd-client.crt",
"--etcd-keyfile=" + testCertsDir + "/apiserver-etcd-client.key",
},
},
{
@ -364,7 +434,7 @@ func TestGetAPIServerCommand(t *testing.T) {
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota",
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
@ -385,9 +455,10 @@ func TestGetAPIServerCommand(t *testing.T) {
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=2001:db8::1",
"--etcd-servers=http://127.0.0.1:2379",
"--etcd-certfile=fiz",
"--etcd-keyfile=faz",
"--etcd-servers=https://127.0.0.1:2379",
"--etcd-cafile=" + testCertsDir + "/ca.crt",
"--etcd-certfile=" + testCertsDir + "/apiserver-etcd-client.crt",
"--etcd-keyfile=" + testCertsDir + "/apiserver-etcd-client.key",
},
},
{
@ -395,13 +466,14 @@ func TestGetAPIServerCommand(t *testing.T) {
API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "2001:db8::1"},
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
FeatureGates: map[string]bool{features.HighAvailability: true},
Etcd: kubeadmapi.Etcd{Endpoints: []string{"https://8.6.4.1:2379", "https://8.6.4.2:2379"}, CAFile: "fuz", CertFile: "fiz", KeyFile: "faz"},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.9.0-beta.0",
},
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota",
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
@ -422,10 +494,131 @@ func TestGetAPIServerCommand(t *testing.T) {
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=2001:db8::1",
"--etcd-servers=http://127.0.0.1:2379",
"--etcd-servers=https://8.6.4.1:2379,https://8.6.4.2:2379",
"--etcd-cafile=fuz",
"--etcd-certfile=fiz",
"--etcd-keyfile=faz",
fmt.Sprintf("--endpoint-reconciler-type=%s", reconcilers.LeaseEndpointReconcilerType),
},
},
{
cfg: &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "2001:db8::1"},
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
Etcd: kubeadmapi.Etcd{Endpoints: []string{"http://127.0.0.1:2379", "http://127.0.0.1:2380"}},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.9.0-beta.0",
},
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
"--tls-cert-file=" + testCertsDir + "/apiserver.crt",
"--tls-private-key-file=" + testCertsDir + "/apiserver.key",
"--kubelet-client-certificate=" + testCertsDir + "/apiserver-kubelet-client.crt",
"--kubelet-client-key=" + testCertsDir + "/apiserver-kubelet-client.key",
fmt.Sprintf("--secure-port=%d", 123),
"--allow-privileged=true",
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
"--enable-bootstrap-token-auth=true",
"--proxy-client-cert-file=/var/lib/certs/front-proxy-client.crt",
"--proxy-client-key-file=/var/lib/certs/front-proxy-client.key",
"--requestheader-username-headers=X-Remote-User",
"--requestheader-group-headers=X-Remote-Group",
"--requestheader-extra-headers-prefix=X-Remote-Extra-",
"--requestheader-client-ca-file=" + testCertsDir + "/front-proxy-ca.crt",
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=2001:db8::1",
"--etcd-servers=http://127.0.0.1:2379,http://127.0.0.1:2380",
},
},
{
cfg: &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "2001:db8::1"},
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
Etcd: kubeadmapi.Etcd{CAFile: "fuz"},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.9.0-beta.0",
},
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
"--tls-cert-file=" + testCertsDir + "/apiserver.crt",
"--tls-private-key-file=" + testCertsDir + "/apiserver.key",
"--kubelet-client-certificate=" + testCertsDir + "/apiserver-kubelet-client.crt",
"--kubelet-client-key=" + testCertsDir + "/apiserver-kubelet-client.key",
fmt.Sprintf("--secure-port=%d", 123),
"--allow-privileged=true",
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
"--enable-bootstrap-token-auth=true",
"--proxy-client-cert-file=/var/lib/certs/front-proxy-client.crt",
"--proxy-client-key-file=/var/lib/certs/front-proxy-client.key",
"--requestheader-username-headers=X-Remote-User",
"--requestheader-group-headers=X-Remote-Group",
"--requestheader-extra-headers-prefix=X-Remote-Extra-",
"--requestheader-client-ca-file=" + testCertsDir + "/front-proxy-ca.crt",
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=2001:db8::1",
"--etcd-servers=https://127.0.0.1:2379",
"--etcd-cafile=" + testCertsDir + "/ca.crt",
"--etcd-certfile=" + testCertsDir + "/apiserver-etcd-client.crt",
"--etcd-keyfile=" + testCertsDir + "/apiserver-etcd-client.key",
},
},
{
cfg: &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "2001:db8::1"},
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
FeatureGates: map[string]bool{features.HighAvailability: true, features.Auditing: true},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.9.0-beta.0",
AuditPolicyConfiguration: kubeadmapi.AuditPolicyConfiguration{
LogMaxAge: utilpointer.Int32Ptr(0),
},
},
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
"--tls-cert-file=" + testCertsDir + "/apiserver.crt",
"--tls-private-key-file=" + testCertsDir + "/apiserver.key",
"--kubelet-client-certificate=" + testCertsDir + "/apiserver-kubelet-client.crt",
"--kubelet-client-key=" + testCertsDir + "/apiserver-kubelet-client.key",
fmt.Sprintf("--secure-port=%d", 123),
"--allow-privileged=true",
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
"--enable-bootstrap-token-auth=true",
"--proxy-client-cert-file=/var/lib/certs/front-proxy-client.crt",
"--proxy-client-key-file=/var/lib/certs/front-proxy-client.key",
"--requestheader-username-headers=X-Remote-User",
"--requestheader-group-headers=X-Remote-Group",
"--requestheader-extra-headers-prefix=X-Remote-Extra-",
"--requestheader-client-ca-file=" + testCertsDir + "/front-proxy-ca.crt",
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=2001:db8::1",
"--etcd-servers=https://127.0.0.1:2379",
"--etcd-cafile=" + testCertsDir + "/ca.crt",
"--etcd-certfile=" + testCertsDir + "/apiserver-etcd-client.crt",
"--etcd-keyfile=" + testCertsDir + "/apiserver-etcd-client.key",
fmt.Sprintf("--endpoint-reconciler-type=%s", reconcilers.LeaseEndpointReconcilerType),
"--audit-policy-file=/etc/kubernetes/audit/audit.yaml",
"--audit-log-path=/var/log/kubernetes/audit/audit.log",
"--audit-log-maxage=0",
},
},
{
cfg: &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "1.2.3.4"},
@ -437,7 +630,7 @@ func TestGetAPIServerCommand(t *testing.T) {
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota",
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
@ -458,7 +651,10 @@ func TestGetAPIServerCommand(t *testing.T) {
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=1.2.3.4",
"--etcd-servers=http://127.0.0.1:2379",
"--etcd-servers=https://127.0.0.1:2379",
"--etcd-cafile=" + testCertsDir + "/ca.crt",
"--etcd-certfile=" + testCertsDir + "/apiserver-etcd-client.crt",
"--etcd-keyfile=" + testCertsDir + "/apiserver-etcd-client.key",
"--cloud-provider=gce",
},
},
@ -473,7 +669,7 @@ func TestGetAPIServerCommand(t *testing.T) {
expected: []string{
"kube-apiserver",
"--insecure-port=0",
"--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota",
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota",
"--service-cluster-ip-range=bar",
"--service-account-key-file=" + testCertsDir + "/sa.pub",
"--client-ca-file=" + testCertsDir + "/ca.crt",
@ -494,7 +690,10 @@ func TestGetAPIServerCommand(t *testing.T) {
"--requestheader-allowed-names=front-proxy-client",
"--authorization-mode=Node,RBAC",
"--advertise-address=1.2.3.4",
"--etcd-servers=http://127.0.0.1:2379",
"--etcd-servers=https://127.0.0.1:2379",
"--etcd-cafile=" + testCertsDir + "/ca.crt",
"--etcd-certfile=" + testCertsDir + "/apiserver-etcd-client.crt",
"--etcd-keyfile=" + testCertsDir + "/apiserver-etcd-client.key",
"--cloud-provider=aws",
},
},
@ -577,7 +776,7 @@ func TestGetControllerManagerCommand(t *testing.T) {
},
{
cfg: &kubeadmapi.MasterConfiguration{
Networking: kubeadmapi.Networking{PodSubnet: "2001:101:115::/48"},
Networking: kubeadmapi.Networking{PodSubnet: "2001:db8::/64"},
CertificatesDir: testCertsDir,
KubernetesVersion: "v1.7.0",
},
@ -593,8 +792,8 @@ func TestGetControllerManagerCommand(t *testing.T) {
"--use-service-account-credentials=true",
"--controllers=*,bootstrapsigner,tokencleaner",
"--allocate-node-cidrs=true",
"--cluster-cidr=2001:101:115::/48",
"--node-cidr-mask-size=64",
"--cluster-cidr=2001:db8::/64",
"--node-cidr-mask-size=80",
},
},
}
@ -609,6 +808,77 @@ func TestGetControllerManagerCommand(t *testing.T) {
}
}
func TestCalcNodeCidrSize(t *testing.T) {
tests := []struct {
name string
podSubnet string
expectedPrefix string
}{
{
name: "Malformed pod subnet",
podSubnet: "10.10.10/160",
expectedPrefix: "24",
},
{
name: "V4: Always uses 24",
podSubnet: "10.10.10.10/16",
expectedPrefix: "24",
},
{
name: "V6: Use pod subnet size, when not enough space",
podSubnet: "2001:db8::/128",
expectedPrefix: "128",
},
{
name: "V6: Use pod subnet size, when not enough space",
podSubnet: "2001:db8::/113",
expectedPrefix: "113",
},
{
name: "V6: Special case with 256 nodes",
podSubnet: "2001:db8::/112",
expectedPrefix: "120",
},
{
name: "V6: Using /120 for node CIDR",
podSubnet: "2001:db8::/104",
expectedPrefix: "120",
},
{
name: "V6: Using /112 for node CIDR",
podSubnet: "2001:db8::/103",
expectedPrefix: "112",
},
{
name: "V6: Using /112 for node CIDR",
podSubnet: "2001:db8::/96",
expectedPrefix: "112",
},
{
name: "V6: Using /104 for node CIDR",
podSubnet: "2001:db8::/95",
expectedPrefix: "104",
},
{
name: "V6: Largest subnet currently supported",
podSubnet: "2001:db8::/66",
expectedPrefix: "80",
},
{
name: "V6: For /64 pod net, use /80",
podSubnet: "2001:db8::/64",
expectedPrefix: "80",
},
}
for _, test := range tests {
actualPrefix := calcNodeCidrSize(test.podSubnet)
if actualPrefix != test.expectedPrefix {
t.Errorf("Case [%s]\nCalc of node CIDR size for pod subnet %q failed: Expected %q, saw %q",
test.name, test.podSubnet, test.expectedPrefix, actualPrefix)
}
}
}
func TestGetControllerManagerCommandExternalCA(t *testing.T) {
tests := []struct {

View File

@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/features"
staticpodutil "k8s.io/kubernetes/cmd/kubeadm/app/util/staticpod"
)
@ -47,6 +48,7 @@ var caCertsPkiVolumePath = "/etc/pki"
func getHostPathVolumesForTheControlPlane(cfg *kubeadmapi.MasterConfiguration) controlPlaneHostPathMounts {
hostPathDirectoryOrCreate := v1.HostPathDirectoryOrCreate
hostPathFileOrCreate := v1.HostPathFileOrCreate
hostPathFile := v1.HostPathFile
mounts := newControlPlaneHostPathMounts()
// HostPath volumes for the API Server
@ -55,7 +57,12 @@ func getHostPathVolumesForTheControlPlane(cfg *kubeadmapi.MasterConfiguration) c
mounts.NewHostPathMount(kubeadmconstants.KubeAPIServer, kubeadmconstants.KubeCertificatesVolumeName, cfg.CertificatesDir, cfg.CertificatesDir, true, &hostPathDirectoryOrCreate)
// Read-only mount for the ca certs (/etc/ssl/certs) directory
mounts.NewHostPathMount(kubeadmconstants.KubeAPIServer, caCertsVolumeName, caCertsVolumePath, caCertsVolumePath, true, &hostPathDirectoryOrCreate)
if features.Enabled(cfg.FeatureGates, features.Auditing) {
// Read-only mount for the audit policy file.
mounts.NewHostPathMount(kubeadmconstants.KubeAPIServer, kubeadmconstants.KubeAuditPolicyVolumeName, cfg.AuditPolicyConfiguration.Path, kubeadmconstants.GetStaticPodAuditPolicyFile(), true, &hostPathFile)
// Write mount for the audit logs.
mounts.NewHostPathMount(kubeadmconstants.KubeAPIServer, kubeadmconstants.KubeAuditPolicyLogVolumeName, cfg.AuditPolicyConfiguration.LogDir, kubeadmconstants.StaticPodAuditPolicyLogDir, false, &hostPathDirectoryOrCreate)
}
// If external etcd is specified, mount the directories needed for accessing the CA/serving certs and the private key
if len(cfg.Etcd.Endpoints) != 0 {
etcdVols, etcdVolMounts := getEtcdCertVolumes(cfg.Etcd, cfg.CertificatesDir)

View File

@ -26,6 +26,7 @@ import (
"k8s.io/api/core/v1"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/features"
)
func TestGetEtcdCertVolumes(t *testing.T) {
@ -258,6 +259,7 @@ func TestGetEtcdCertVolumes(t *testing.T) {
func TestGetHostPathVolumesForTheControlPlane(t *testing.T) {
hostPathDirectoryOrCreate := v1.HostPathDirectoryOrCreate
hostPathFileOrCreate := v1.HostPathFileOrCreate
hostPathFile := v1.HostPathFile
volMap := make(map[string]map[string]v1.Volume)
volMap[kubeadmconstants.KubeAPIServer] = map[string]v1.Volume{}
volMap[kubeadmconstants.KubeAPIServer]["k8s-certs"] = v1.Volume{
@ -278,6 +280,24 @@ func TestGetHostPathVolumesForTheControlPlane(t *testing.T) {
},
},
}
volMap[kubeadmconstants.KubeAPIServer]["audit"] = v1.Volume{
Name: "audit",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/foo/bar/baz.yaml",
Type: &hostPathFile,
},
},
}
volMap[kubeadmconstants.KubeAPIServer]["audit-log"] = v1.Volume{
Name: "audit-log",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/bar/foo",
Type: &hostPathDirectoryOrCreate,
},
},
}
volMap[kubeadmconstants.KubeControllerManager] = map[string]v1.Volume{}
volMap[kubeadmconstants.KubeControllerManager]["k8s-certs"] = v1.Volume{
Name: "k8s-certs",
@ -328,6 +348,16 @@ func TestGetHostPathVolumesForTheControlPlane(t *testing.T) {
MountPath: "/etc/ssl/certs",
ReadOnly: true,
}
volMountMap[kubeadmconstants.KubeAPIServer]["audit"] = v1.VolumeMount{
Name: "audit",
MountPath: "/etc/kubernetes/audit/audit.yaml",
ReadOnly: true,
}
volMountMap[kubeadmconstants.KubeAPIServer]["audit-log"] = v1.VolumeMount{
Name: "audit-log",
MountPath: "/var/log/kubernetes/audit",
ReadOnly: false,
}
volMountMap[kubeadmconstants.KubeControllerManager] = map[string]v1.VolumeMount{}
volMountMap[kubeadmconstants.KubeControllerManager]["k8s-certs"] = v1.VolumeMount{
Name: "k8s-certs",
@ -481,6 +511,11 @@ func TestGetHostPathVolumesForTheControlPlane(t *testing.T) {
cfg: &kubeadmapi.MasterConfiguration{
CertificatesDir: testCertsDir,
Etcd: kubeadmapi.Etcd{},
FeatureGates: map[string]bool{features.Auditing: true},
AuditPolicyConfiguration: kubeadmapi.AuditPolicyConfiguration{
Path: "/foo/bar/baz.yaml",
LogDir: "/bar/foo",
},
},
vol: volMap,
volMount: volMountMap,

View File

@ -9,8 +9,7 @@ load(
go_test(
name = "go_default_test",
srcs = ["local_test.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd",
library = ":go_default_library",
embed = [":go_default_library"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",

View File

@ -18,6 +18,7 @@ package etcd
import (
"fmt"
"path/filepath"
"k8s.io/api/core/v1"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
@ -28,7 +29,8 @@ import (
)
const (
etcdVolumeName = "etcd"
etcdVolumeName = "etcd-data"
certsVolumeName = "k8s-certs"
)
// CreateLocalEtcdStaticPodManifestFile will write local etcd static pod manifest file.
@ -46,18 +48,23 @@ func CreateLocalEtcdStaticPodManifestFile(manifestDir string, cfg *kubeadmapi.Ma
}
// GetEtcdPodSpec returns the etcd static Pod actualized to the context of the current MasterConfiguration
// NB. GetEtcdPodSpec methods holds the information about how kubeadm creates etcd static pod mainfests.
// NB. GetEtcdPodSpec methods holds the information about how kubeadm creates etcd static pod manifests.
func GetEtcdPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.Pod {
pathType := v1.HostPathDirectoryOrCreate
etcdMounts := map[string]v1.Volume{
etcdVolumeName: staticpodutil.NewVolume(etcdVolumeName, cfg.Etcd.DataDir, &pathType),
etcdVolumeName: staticpodutil.NewVolume(etcdVolumeName, cfg.Etcd.DataDir, &pathType),
certsVolumeName: staticpodutil.NewVolume(certsVolumeName, cfg.CertificatesDir, &pathType),
}
return staticpodutil.ComponentPod(v1.Container{
Name: kubeadmconstants.Etcd,
Command: getEtcdCommand(cfg),
Image: images.GetCoreImage(kubeadmconstants.Etcd, cfg.ImageRepository, cfg.KubernetesVersion, cfg.Etcd.Image),
Name: kubeadmconstants.Etcd,
Command: getEtcdCommand(cfg),
Image: images.GetCoreImage(kubeadmconstants.Etcd, cfg.ImageRepository, cfg.KubernetesVersion, cfg.Etcd.Image),
ImagePullPolicy: cfg.ImagePullPolicy,
// Mount the etcd datadir path read-write so etcd can store data in a more persistent manner
VolumeMounts: []v1.VolumeMount{staticpodutil.NewVolumeMount(etcdVolumeName, cfg.Etcd.DataDir, false)},
VolumeMounts: []v1.VolumeMount{
staticpodutil.NewVolumeMount(etcdVolumeName, cfg.Etcd.DataDir, false),
staticpodutil.NewVolumeMount(certsVolumeName, cfg.CertificatesDir, false),
},
LivenessProbe: staticpodutil.ComponentProbe(cfg, kubeadmconstants.Etcd, 2379, "/health", v1.URISchemeHTTP),
}, etcdMounts)
}
@ -65,9 +72,17 @@ func GetEtcdPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.Pod {
// getEtcdCommand builds the right etcd command from the given config object
func getEtcdCommand(cfg *kubeadmapi.MasterConfiguration) []string {
defaultArguments := map[string]string{
"listen-client-urls": "http://127.0.0.1:2379",
"advertise-client-urls": "http://127.0.0.1:2379",
"listen-client-urls": "https://127.0.0.1:2379",
"advertise-client-urls": "https://127.0.0.1:2379",
"data-dir": cfg.Etcd.DataDir,
"cert-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdServerCertName),
"key-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdServerKeyName),
"trusted-ca-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.CACertName),
"client-cert-auth": "true",
"peer-cert-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdPeerCertName),
"peer-key-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdPeerKeyName),
"peer-trusted-ca-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.CACertName),
"peer-client-cert-auth": "true",
}
command := []string{"etcd"}

View File

@ -41,7 +41,7 @@ func TestGetEtcdPodSpec(t *testing.T) {
// Assert each specs refers to the right pod
if spec.Spec.Containers[0].Name != kubeadmconstants.Etcd {
t.Errorf("getKubeConfigSpecs spec for etcd contains pod %s, expectes %s", spec.Spec.Containers[0].Name, kubeadmconstants.Etcd)
t.Errorf("getKubeConfigSpecs spec for etcd contains pod %s, expects %s", spec.Spec.Containers[0].Name, kubeadmconstants.Etcd)
}
}
@ -79,9 +79,17 @@ func TestGetEtcdCommand(t *testing.T) {
},
expected: []string{
"etcd",
"--listen-client-urls=http://127.0.0.1:2379",
"--advertise-client-urls=http://127.0.0.1:2379",
"--listen-client-urls=https://127.0.0.1:2379",
"--advertise-client-urls=https://127.0.0.1:2379",
"--data-dir=/var/lib/etcd",
"--cert-file=" + kubeadmconstants.EtcdServerCertName,
"--key-file=" + kubeadmconstants.EtcdServerKeyName,
"--trusted-ca-file=" + kubeadmconstants.CACertName,
"--client-cert-auth=true",
"--peer-cert-file=" + kubeadmconstants.EtcdPeerCertName,
"--peer-key-file=" + kubeadmconstants.EtcdPeerKeyName,
"--peer-trusted-ca-file=" + kubeadmconstants.CACertName,
"--peer-client-cert-auth=true",
},
},
{
@ -89,16 +97,24 @@ func TestGetEtcdCommand(t *testing.T) {
Etcd: kubeadmapi.Etcd{
DataDir: "/var/lib/etcd",
ExtraArgs: map[string]string{
"listen-client-urls": "http://10.0.1.10:2379",
"advertise-client-urls": "http://10.0.1.10:2379",
"listen-client-urls": "https://10.0.1.10:2379",
"advertise-client-urls": "https://10.0.1.10:2379",
},
},
},
expected: []string{
"etcd",
"--listen-client-urls=http://10.0.1.10:2379",
"--advertise-client-urls=http://10.0.1.10:2379",
"--listen-client-urls=https://10.0.1.10:2379",
"--advertise-client-urls=https://10.0.1.10:2379",
"--data-dir=/var/lib/etcd",
"--cert-file=" + kubeadmconstants.EtcdServerCertName,
"--key-file=" + kubeadmconstants.EtcdServerKeyName,
"--trusted-ca-file=" + kubeadmconstants.CACertName,
"--client-cert-auth=true",
"--peer-cert-file=" + kubeadmconstants.EtcdPeerCertName,
"--peer-key-file=" + kubeadmconstants.EtcdPeerKeyName,
"--peer-trusted-ca-file=" + kubeadmconstants.CACertName,
"--peer-client-cert-auth=true",
},
},
{
@ -107,9 +123,17 @@ func TestGetEtcdCommand(t *testing.T) {
},
expected: []string{
"etcd",
"--listen-client-urls=http://127.0.0.1:2379",
"--advertise-client-urls=http://127.0.0.1:2379",
"--listen-client-urls=https://127.0.0.1:2379",
"--advertise-client-urls=https://127.0.0.1:2379",
"--data-dir=/etc/foo",
"--cert-file=" + kubeadmconstants.EtcdServerCertName,
"--key-file=" + kubeadmconstants.EtcdServerKeyName,
"--trusted-ca-file=" + kubeadmconstants.CACertName,
"--client-cert-auth=true",
"--peer-cert-file=" + kubeadmconstants.EtcdPeerCertName,
"--peer-key-file=" + kubeadmconstants.EtcdPeerKeyName,
"--peer-trusted-ca-file=" + kubeadmconstants.CACertName,
"--peer-client-cert-auth=true",
},
},
}

View File

@ -68,7 +68,7 @@ func AddKnownTypes(s *runtime.Scheme) error {
type EtcdClusterList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty"`
Items []EtcdCluster `json:"items"`
}

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package spec
@ -91,9 +91,8 @@ func (in *EtcdCluster) DeepCopy() *EtcdCluster {
func (in *EtcdCluster) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -125,9 +124,8 @@ func (in *EtcdClusterList) DeepCopy() *EtcdClusterList {
func (in *EtcdClusterList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View File

@ -41,8 +41,7 @@ filegroup(
go_test(
name = "go_default_test",
srcs = ["kubeconfig_test.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig",
library = ":go_default_library",
embed = [":go_default_library"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",

View File

@ -231,7 +231,7 @@ func createKubeConfigFileIfNotExists(outDir, filename string, config *clientcmda
return fmt.Errorf("failed to save kubeconfig file %s on disk: %v", kubeConfigFilePath, err)
}
fmt.Printf("[kubeconfig] Wrote KubeConfig file to disk: %q\n", filename)
fmt.Printf("[kubeconfig] Wrote KubeConfig file to disk: %q\n", kubeConfigFilePath)
return nil
}
@ -258,7 +258,7 @@ func createKubeConfigFileIfNotExists(outDir, filename string, config *clientcmda
// kubeadm doesn't validate the existing kubeconfig file more than this (kubeadm trusts the client certs to be valid)
// Basically, if we find a kubeconfig file with the same path; the same CA cert and the same server URL;
// kubeadm thinks those files are equal and doesn't bother writing a new file
fmt.Printf("[kubeconfig] Using existing up-to-date KubeConfig file: %q\n", filename)
fmt.Printf("[kubeconfig] Using existing up-to-date KubeConfig file: %q\n", kubeConfigFilePath)
return nil
}

View File

@ -71,12 +71,25 @@ func TestGetKubeConfigSpecs(t *testing.T) {
NodeName: "valid-node-name",
}
// Creates a Master Configuration pointing to the pkidir folder
cfgDNS := &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{ControlPlaneEndpoint: "api.k8s.io", BindPort: 1234},
CertificatesDir: pkidir,
NodeName: "valid-node-name",
}
// Executes getKubeConfigSpecs
specs, err := getKubeConfigSpecs(cfg)
if err != nil {
t.Fatal("getKubeConfigSpecs failed!")
}
// Executes getKubeConfigSpecs
specsDNS, err := getKubeConfigSpecs(cfgDNS)
if err != nil {
t.Fatal("getKubeConfigSpecs failed!")
}
var assertions = []struct {
kubeConfigFile string
clientName string
@ -136,6 +149,39 @@ func TestGetKubeConfigSpecs(t *testing.T) {
} else {
t.Errorf("getKubeConfigSpecs didn't create spec for %s ", assertion.kubeConfigFile)
}
// assert the spec for the kubeConfigFile exists
if spec, ok := specsDNS[assertion.kubeConfigFile]; ok {
// Assert clientName
if spec.ClientName != assertion.clientName {
t.Errorf("getKubeConfigSpecs for %s clientName is %s, expected %s", assertion.kubeConfigFile, spec.ClientName, assertion.clientName)
}
// Assert Organizations
if spec.ClientCertAuth == nil || !reflect.DeepEqual(spec.ClientCertAuth.Organizations, assertion.organizations) {
t.Errorf("getKubeConfigSpecs for %s Organizations is %v, expected %v", assertion.kubeConfigFile, spec.ClientCertAuth.Organizations, assertion.organizations)
}
// Asserts MasterConfiguration values injected into spec
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(cfgDNS)
if err != nil {
t.Error(err)
}
if spec.APIServer != masterEndpoint {
t.Errorf("getKubeConfigSpecs didn't injected cfg.APIServer endpoint into spec for %s", assertion.kubeConfigFile)
}
// Asserts CA certs and CA keys loaded into specs
if spec.CACert == nil {
t.Errorf("getKubeConfigSpecs didn't loaded CACert into spec for %s!", assertion.kubeConfigFile)
}
if spec.ClientCertAuth == nil || spec.ClientCertAuth.CAKey == nil {
t.Errorf("getKubeConfigSpecs didn't loaded CAKey into spec for %s!", assertion.kubeConfigFile)
}
} else {
t.Errorf("getKubeConfigSpecs didn't create spec for %s ", assertion.kubeConfigFile)
}
}
}
@ -143,7 +189,7 @@ func TestBuildKubeConfigFromSpecWithClientAuth(t *testing.T) {
// Creates a CA
caCert, caKey := certstestutil.SetupCertificateAuthorithy(t)
// Executes buildKubeConfigFromSpec passing a KubeConfigSpec wiht a ClientAuth
// Executes buildKubeConfigFromSpec passing a KubeConfigSpec with a ClientAuth
config := setupdKubeConfigWithClientAuth(t, caCert, caKey, "https://1.2.3.4:1234", "myClientName", "myOrg1", "myOrg2")
// Asserts spec data are propagated to the kubeconfig
@ -155,7 +201,7 @@ func TestBuildKubeConfigFromSpecWithTokenAuth(t *testing.T) {
// Creates a CA
caCert, _ := certstestutil.SetupCertificateAuthorithy(t)
// Executes buildKubeConfigFromSpec passing a KubeConfigSpec wiht a Token
// Executes buildKubeConfigFromSpec passing a KubeConfigSpec with a Token
config := setupdKubeConfigWithTokenAuth(t, caCert, "https://1.2.3.4:1234", "myClientName", "123456")
// Asserts spec data are propagated to the kubeconfig
@ -219,7 +265,7 @@ func TestCreateKubeConfigFileIfNotExists(t *testing.T) {
t.Errorf("createKubeConfigFileIfNotExists failed")
}
// Assert creted files is there
// Assert that the created file is there
testutil.AssertFileExists(t, tmpdir, "test.conf")
}
}
@ -338,10 +384,10 @@ func TestWriteKubeConfig(t *testing.T) {
// Adds a pki folder with a ca cert to the temp folder
pkidir := testutil.SetupPkiDirWithCertificateAuthorithy(t, tmpdir)
// Retrives ca cert for assertions
// Retrieves ca cert for assertions
caCert, _, err := pkiutil.TryLoadCertAndKeyFromDisk(pkidir, kubeadmconstants.CACertAndKeyBaseName)
if err != nil {
t.Fatalf("couldn't retrive ca cert: %v", err)
t.Fatalf("couldn't retrieve ca cert: %v", err)
}
// Creates a Master Configuration pointing to the pkidir folder

View File

@ -13,7 +13,7 @@ go_library(
"//cmd/kubeadm/app/util/kubeconfig:go_default_library",
"//pkg/apis/rbac/v1:go_default_library",
"//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library",
"//pkg/kubelet/apis/kubeletconfig/v1alpha1:go_default_library",
"//pkg/kubelet/apis/kubeletconfig/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
@ -28,12 +28,11 @@ go_library(
go_test(
name = "go_default_test",
srcs = ["kubelet_test.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet",
library = ":go_default_library",
embed = [":go_default_library"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//pkg/kubelet/apis/kubeletconfig/v1alpha1:go_default_library",
"//pkg/kubelet/apis/kubeletconfig/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",

View File

@ -38,7 +38,7 @@ import (
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
rbachelper "k8s.io/kubernetes/pkg/apis/rbac/v1"
kubeletconfigscheme "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme"
kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1"
kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1"
)
// CreateBaseKubeletConfiguration creates base kubelet configuration for dynamic kubelet configuration feature.
@ -50,7 +50,7 @@ func CreateBaseKubeletConfiguration(cfg *kubeadmapi.MasterConfiguration, client
if err != nil {
return err
}
kubeletBytes, err := kubeadmutil.MarshalToYamlForCodecs(cfg.KubeletConfiguration.BaseConfig, kubeletconfigv1alpha1.SchemeGroupVersion, *kubeletCodecs)
kubeletBytes, err := kubeadmutil.MarshalToYamlForCodecs(cfg.KubeletConfiguration.BaseConfig, kubeletconfigv1beta1.SchemeGroupVersion, *kubeletCodecs)
if err != nil {
return err
}
@ -210,7 +210,7 @@ func WriteInitKubeletConfigToDiskOnMaster(cfg *kubeadmapi.MasterConfiguration) e
return err
}
kubeletBytes, err := kubeadmutil.MarshalToYamlForCodecs(cfg.KubeletConfiguration.BaseConfig, kubeletconfigv1alpha1.SchemeGroupVersion, *kubeletCodecs)
kubeletBytes, err := kubeadmutil.MarshalToYamlForCodecs(cfg.KubeletConfiguration.BaseConfig, kubeletconfigv1beta1.SchemeGroupVersion, *kubeletCodecs)
if err != nil {
return err
}

View File

@ -26,7 +26,7 @@ import (
core "k8s.io/client-go/testing"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1"
kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1"
)
func TestCreateBaseKubeletConfiguration(t *testing.T) {
@ -35,7 +35,7 @@ func TestCreateBaseKubeletConfiguration(t *testing.T) {
cfg := &kubeadmapi.MasterConfiguration{
NodeName: nodeName,
KubeletConfiguration: kubeadmapi.KubeletConfiguration{
BaseConfig: &kubeletconfigv1alpha1.KubeletConfiguration{
BaseConfig: &kubeletconfigv1beta1.KubeletConfiguration{
TypeMeta: metav1.TypeMeta{
Kind: "KubeletConfiguration",
},

View File

@ -9,8 +9,7 @@ load(
go_test(
name = "go_default_test",
srcs = ["markmaster_test.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/markmaster",
library = ":go_default_library",
embed = [":go_default_library"],
deps = [
"//cmd/kubeadm/app/constants:go_default_library",
"//pkg/kubelet/apis:go_default_library",

View File

@ -32,9 +32,13 @@ import (
)
// MarkMaster taints the master and sets the master label
func MarkMaster(client clientset.Interface, masterName string) error {
func MarkMaster(client clientset.Interface, masterName string, taint bool) error {
fmt.Printf("[markmaster] Will mark node %s as master by adding a label and a taint\n", masterName)
if taint {
fmt.Printf("[markmaster] Will mark node %s as master by adding a label and a taint\n", masterName)
} else {
fmt.Printf("[markmaster] Will mark node %s as master by adding a label\n", masterName)
}
// Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned.
return wait.Poll(kubeadmconstants.APICallRetryInterval, kubeadmconstants.MarkMasterTimeout, func() (bool, error) {
@ -56,7 +60,7 @@ func MarkMaster(client clientset.Interface, masterName string) error {
}
// The master node should be tainted and labelled accordingly
markMasterNode(n)
markMasterNode(n, taint)
newData, err := json.Marshal(n)
if err != nil {
@ -76,15 +80,23 @@ func MarkMaster(client clientset.Interface, masterName string) error {
return false, err
}
fmt.Printf("[markmaster] Master %s tainted and labelled with key/value: %s=%q\n", masterName, kubeadmconstants.LabelNodeRoleMaster, "")
if taint {
fmt.Printf("[markmaster] Master %s tainted and labelled with key/value: %s=%q\n", masterName, kubeadmconstants.LabelNodeRoleMaster, "")
} else {
fmt.Printf("[markmaster] Master %s labelled with key/value: %s=%q\n", masterName, kubeadmconstants.LabelNodeRoleMaster, "")
}
return true, nil
})
}
func markMasterNode(n *v1.Node) {
func markMasterNode(n *v1.Node, taint bool) {
n.ObjectMeta.Labels[kubeadmconstants.LabelNodeRoleMaster] = ""
addTaintIfNotExists(n, kubeadmconstants.MasterTaint)
if taint {
addTaintIfNotExists(n, kubeadmconstants.MasterTaint)
} else {
delTaintIfExists(n, kubeadmconstants.MasterTaint)
}
}
func addTaintIfNotExists(n *v1.Node, t v1.Taint) {
@ -96,3 +108,14 @@ func addTaintIfNotExists(n *v1.Node, t v1.Taint) {
n.Spec.Taints = append(n.Spec.Taints, t)
}
func delTaintIfExists(n *v1.Node, t v1.Taint) {
var taints []v1.Taint
for _, taint := range n.Spec.Taints {
if taint == t {
continue
}
taints = append(taints, t)
}
n.Spec.Taints = taints
}

View File

@ -43,32 +43,51 @@ func TestMarkMaster(t *testing.T) {
name string
existingLabel string
existingTaint *v1.Taint
wantTaint bool
expectedPatch string
}{
{
"master label and taint missing",
"",
nil,
true,
"{\"metadata\":{\"labels\":{\"node-role.kubernetes.io/master\":\"\"}},\"spec\":{\"taints\":[{\"effect\":\"NoSchedule\",\"key\":\"node-role.kubernetes.io/master\"}]}}",
},
{
"master label and taint missing but taint not wanted",
"",
nil,
false,
"{\"metadata\":{\"labels\":{\"node-role.kubernetes.io/master\":\"\"}}}",
},
{
"master label missing",
"",
&kubeadmconstants.MasterTaint,
true,
"{\"metadata\":{\"labels\":{\"node-role.kubernetes.io/master\":\"\"}}}",
},
{
"master taint missing",
kubeadmconstants.LabelNodeRoleMaster,
nil,
true,
"{\"spec\":{\"taints\":[{\"effect\":\"NoSchedule\",\"key\":\"node-role.kubernetes.io/master\"}]}}",
},
{
"nothing missing",
kubeadmconstants.LabelNodeRoleMaster,
&kubeadmconstants.MasterTaint,
true,
"{}",
},
{
"nothing missing but taint unwanted",
kubeadmconstants.LabelNodeRoleMaster,
&kubeadmconstants.MasterTaint,
false,
"{\"spec\":{\"taints\":null}}",
},
}
for _, tc := range tests {
@ -125,7 +144,7 @@ func TestMarkMaster(t *testing.T) {
t.Fatalf("MarkMaster(%s): unexpected error building clientset: %v", tc.name, err)
}
err = MarkMaster(cs, hostname)
err = MarkMaster(cs, hostname, tc.wantTaint)
if err != nil {
t.Errorf("MarkMaster(%s) returned unexpected error: %v", tc.name, err)
}

View File

@ -13,13 +13,12 @@ go_test(
"selfhosting_test.go",
"selfhosting_volumes_test.go",
],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting",
library = ":go_default_library",
embed = [":go_default_library"],
deps = [
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
],
)
@ -39,7 +38,7 @@ go_library(
"//cmd/kubeadm/app/util:go_default_library",
"//cmd/kubeadm/app/util/apiclient:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",

View File

@ -18,6 +18,7 @@ package selfhosting
import (
"path/filepath"
"strings"
"k8s.io/api/core/v1"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
@ -35,13 +36,14 @@ const (
// PodSpecMutatorFunc is a function capable of mutating a PodSpec
type PodSpecMutatorFunc func(*v1.PodSpec)
// GetDefaultMutators gets the mutator functions that alwasy should be used
// GetDefaultMutators gets the mutator functions that always should be used
func GetDefaultMutators() map[string][]PodSpecMutatorFunc {
return map[string][]PodSpecMutatorFunc{
kubeadmconstants.KubeAPIServer: {
addNodeSelectorToPodSpec,
setMasterTolerationOnPodSpec,
setRightDNSPolicyOnPodSpec,
setHostIPOnPodSpec,
},
kubeadmconstants.KubeControllerManager: {
addNodeSelectorToPodSpec,
@ -101,6 +103,26 @@ func setMasterTolerationOnPodSpec(podSpec *v1.PodSpec) {
podSpec.Tolerations = append(podSpec.Tolerations, kubeadmconstants.MasterToleration)
}
// setHostIPOnPodSpec sets the environment variable HOST_IP using downward API
func setHostIPOnPodSpec(podSpec *v1.PodSpec) {
envVar := v1.EnvVar{
Name: "HOST_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "status.hostIP",
},
},
}
podSpec.Containers[0].Env = append(podSpec.Containers[0].Env, envVar)
for i := range podSpec.Containers[0].Command {
if strings.Contains(podSpec.Containers[0].Command[i], "advertise-address") {
podSpec.Containers[0].Command[i] = "--advertise-address=$(HOST_IP)"
}
}
}
// setRightDNSPolicyOnPodSpec makes sure the self-hosted components can look up things via kube-dns if necessary
func setRightDNSPolicyOnPodSpec(podSpec *v1.PodSpec) {
podSpec.DNSPolicy = v1.DNSClusterFirstWithHostNet

View File

@ -33,8 +33,36 @@ func TestMutatePodSpec(t *testing.T) {
}{
{
component: kubeadmconstants.KubeAPIServer,
podSpec: &v1.PodSpec{},
podSpec: &v1.PodSpec{
Containers: []v1.Container{
{
Name: "kube-apiserver",
Command: []string{
"--advertise-address=10.0.0.1",
},
},
},
},
expected: v1.PodSpec{
Containers: []v1.Container{
{
Name: "kube-apiserver",
Command: []string{
"--advertise-address=$(HOST_IP)",
},
Env: []v1.EnvVar{
{
Name: "HOST_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "status.hostIP",
},
},
},
},
},
},
NodeSelector: map[string]string{
kubeadmconstants.LabelNodeRoleMaster: "",
},
@ -185,6 +213,55 @@ func TestSetRightDNSPolicyOnPodSpec(t *testing.T) {
}
}
func TestSetHostIPOnPodSpec(t *testing.T) {
var tests = []struct {
podSpec *v1.PodSpec
expected v1.PodSpec
}{
{
podSpec: &v1.PodSpec{
Containers: []v1.Container{
{
Name: "kube-apiserver",
Command: []string{
"--advertise-address=10.0.0.1",
},
Env: []v1.EnvVar{},
},
},
},
expected: v1.PodSpec{
Containers: []v1.Container{
{
Name: "kube-apiserver",
Command: []string{
"--advertise-address=$(HOST_IP)",
},
Env: []v1.EnvVar{
{
Name: "HOST_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "status.hostIP",
},
},
},
},
},
},
},
},
}
for _, rt := range tests {
setHostIPOnPodSpec(rt.podSpec)
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
t.Errorf("failed setHostIPOnPodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
}
}
}
func TestSetSelfHostedVolumesForAPIServer(t *testing.T) {
hostPathDirectoryOrCreate := v1.HostPathDirectoryOrCreate
var tests = []struct {

View File

@ -21,7 +21,7 @@ import (
"os"
"time"
apps "k8s.io/api/apps/v1beta2"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
@ -112,7 +112,7 @@ func CreateSelfHostedControlPlane(manifestsDir, kubeConfigDir string, cfg *kubea
}
// Wait for the mirror Pod hash to be removed; otherwise we'll run into race conditions here when the kubelet hasn't had time to
// remove the Static Pod (or the mirror Pod respectively). This implicitely also tests that the API server endpoint is healthy,
// remove the Static Pod (or the mirror Pod respectively). This implicitly also tests that the API server endpoint is healthy,
// because this blocks until the API server returns a 404 Not Found when getting the Static Pod
staticPodName := fmt.Sprintf("%s-%s", componentName, cfg.NodeName)
if err := waiter.WaitForPodToDisappear(staticPodName); err != nil {
@ -129,7 +129,7 @@ func CreateSelfHostedControlPlane(manifestsDir, kubeConfigDir string, cfg *kubea
return nil
}
// BuildDaemonSet is responsible for mutating the PodSpec and return a DaemonSet which is suitable for the self-hosting purporse
// BuildDaemonSet is responsible for mutating the PodSpec and returns a DaemonSet which is suitable for self-hosting
func BuildDaemonSet(name string, podSpec *v1.PodSpec, mutators map[string][]PodSpecMutatorFunc) *apps.DaemonSet {
// Mutate the PodSpec so it's suitable for self-hosting

View File

@ -23,7 +23,7 @@ import (
"os"
"testing"
apps "k8s.io/api/apps/v1beta2"
apps "k8s.io/api/apps/v1"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/util"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
@ -66,7 +66,7 @@ spec:
- --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
- --authorization-mode=Node,RBAC
- --etcd-servers=http://127.0.0.1:2379
image: gcr.io/google_containers/kube-apiserver-amd64:v1.7.4
image: k8s.gcr.io/kube-apiserver-amd64:v1.7.4
livenessProbe:
failureThreshold: 8
httpGet:
@ -104,7 +104,7 @@ spec:
status: {}
`
testAPIServerDaemonSet = `apiVersion: apps/v1beta2
testAPIServerDaemonSet = `apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
@ -134,7 +134,7 @@ spec:
- --service-cluster-ip-range=10.96.0.0/12
- --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
- --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
- --advertise-address=192.168.1.115
- --advertise-address=$(HOST_IP)
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
- --insecure-port=0
- --experimental-bootstrap-token-auth=true
@ -148,7 +148,12 @@ spec:
- --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
- --authorization-mode=Node,RBAC
- --etcd-servers=http://127.0.0.1:2379
image: gcr.io/google_containers/kube-apiserver-amd64:v1.7.4
env:
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
image: k8s.gcr.io/kube-apiserver-amd64:v1.7.4
livenessProbe:
failureThreshold: 8
httpGet:
@ -220,7 +225,7 @@ spec:
- --cluster-signing-key-file=/etc/kubernetes/pki/ca.key
- --address=127.0.0.1
- --use-service-account-credentials=true
image: gcr.io/google_containers/kube-controller-manager-amd64:v1.7.4
image: k8s.gcr.io/kube-controller-manager-amd64:v1.7.4
livenessProbe:
failureThreshold: 8
httpGet:
@ -265,7 +270,7 @@ spec:
status: {}
`
testControllerManagerDaemonSet = `apiVersion: apps/v1beta2
testControllerManagerDaemonSet = `apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
@ -295,7 +300,7 @@ spec:
- --cluster-signing-key-file=/etc/kubernetes/pki/ca.key
- --address=127.0.0.1
- --use-service-account-credentials=true
image: gcr.io/google_containers/kube-controller-manager-amd64:v1.7.4
image: k8s.gcr.io/kube-controller-manager-amd64:v1.7.4
livenessProbe:
failureThreshold: 8
httpGet:
@ -368,7 +373,7 @@ spec:
- --leader-elect=true
- --kubeconfig=/etc/kubernetes/scheduler.conf
- --address=127.0.0.1
image: gcr.io/google_containers/kube-scheduler-amd64:v1.7.4
image: k8s.gcr.io/kube-scheduler-amd64:v1.7.4
livenessProbe:
failureThreshold: 8
httpGet:
@ -395,7 +400,7 @@ spec:
status: {}
`
testSchedulerDaemonSet = `apiVersion: apps/v1beta2
testSchedulerDaemonSet = `apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
@ -419,7 +424,7 @@ spec:
- --leader-elect=true
- --kubeconfig=/etc/kubernetes/scheduler.conf
- --address=127.0.0.1
image: gcr.io/google_containers/kube-scheduler-amd64:v1.7.4
image: k8s.gcr.io/kube-scheduler-amd64:v1.7.4
livenessProbe:
failureThreshold: 8
httpGet:
@ -521,7 +526,7 @@ metadata:
name: testpod
spec:
containers:
- image: gcr.io/google_containers/busybox
- image: k8s.gcr.io/busybox
`,
expectError: false,
},
@ -537,7 +542,7 @@ spec:
"spec": {
"containers": [
{
"image": "gcr.io/google_containers/busybox"
"image": "k8s.gcr.io/busybox"
}
]
}
@ -552,7 +557,7 @@ kind: Pod
metadata:
name: testpod
spec:
- image: gcr.io/google_containers/busybox
- image: k8s.gcr.io/busybox
`,
expectError: true,
},

View File

@ -1,14 +0,0 @@
package(default_visibility = ["//visibility:public"])
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -40,7 +40,7 @@ go_library(
"//pkg/api/legacyscheme:go_default_library",
"//pkg/util/version:go_default_library",
"//pkg/version:go_default_library",
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
@ -75,8 +75,7 @@ go_test(
"prepull_test.go",
"staticpods_test.go",
],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade",
library = ":go_default_library",
embed = [":go_default_library"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library",

View File

@ -47,7 +47,7 @@ func (f *fakeVersionGetter) VersionFromCILabel(ciVersionLabel, _ string) (string
if ciVersionLabel == "latest" {
return f.latestVersion, versionutil.MustParseSemantic(f.latestVersion), nil
}
if ciVersionLabel == "latest-1.9" {
if ciVersionLabel == "latest-1.10" {
return f.latestDevBranchVersion, versionutil.MustParseSemantic(f.latestDevBranchVersion), nil
}
return f.stablePatchVersion, versionutil.MustParseSemantic(f.stablePatchVersion), nil
@ -64,7 +64,7 @@ type fakeEtcdCluster struct{}
func (f fakeEtcdCluster) GetEtcdClusterStatus() (*clientv3.StatusResponse, error) {
client := &clientv3.StatusResponse{}
client.Version = "3.0.14"
client.Version = "3.1.11"
return client, nil
}
@ -78,12 +78,12 @@ func TestGetAvailableUpgrades(t *testing.T) {
}{
{ // no action needed, already up-to-date
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.8.3",
clusterVersion: "v1.9.3",
kubeletVersion: "v1.9.3",
kubeadmVersion: "v1.9.3",
stablePatchVersion: "v1.8.3",
stableVersion: "v1.8.3",
stablePatchVersion: "v1.9.3",
stableVersion: "v1.9.3",
},
expectedUpgrades: []Upgrade{},
allowExperimental: false,
@ -91,30 +91,30 @@ func TestGetAvailableUpgrades(t *testing.T) {
},
{ // simple patch version upgrade
vg: &fakeVersionGetter{
clusterVersion: "v1.8.1",
kubeletVersion: "v1.8.1", // the kubelet are on the same version as the control plane
kubeadmVersion: "v1.8.2",
clusterVersion: "v1.9.1",
kubeletVersion: "v1.9.1", // the kubelet are on the same version as the control plane
kubeadmVersion: "v1.9.2",
stablePatchVersion: "v1.8.3",
stableVersion: "v1.8.3",
stablePatchVersion: "v1.9.3",
stableVersion: "v1.9.3",
},
expectedUpgrades: []Upgrade{
{
Description: "version in the v1.8 series",
Description: "version in the v1.9 series",
Before: ClusterState{
KubeVersion: "v1.8.1",
KubeVersion: "v1.9.1",
KubeletVersions: map[string]uint16{
"v1.8.1": 1,
"v1.9.1": 1,
},
KubeadmVersion: "v1.8.2",
DNSVersion: "1.14.5",
EtcdVersion: "3.0.14",
KubeadmVersion: "v1.9.2",
DNSVersion: "1.14.8",
EtcdVersion: "3.1.11",
},
After: ClusterState{
KubeVersion: "v1.8.3",
KubeadmVersion: "v1.8.3",
DNSVersion: "1.14.5",
EtcdVersion: "3.0.17",
KubeVersion: "v1.9.3",
KubeadmVersion: "v1.9.3",
DNSVersion: "1.14.8",
EtcdVersion: "3.1.11",
},
},
},
@ -123,30 +123,30 @@ func TestGetAvailableUpgrades(t *testing.T) {
},
{ // minor version upgrade only
vg: &fakeVersionGetter{
clusterVersion: "v1.8.1",
kubeletVersion: "v1.8.1", // the kubelet are on the same version as the control plane
kubeadmVersion: "v1.9.0",
clusterVersion: "v1.9.1",
kubeletVersion: "v1.9.1", // the kubelet are on the same version as the control plane
kubeadmVersion: "v1.10.0",
stablePatchVersion: "v1.8.1",
stableVersion: "v1.9.0",
stablePatchVersion: "v1.9.1",
stableVersion: "v1.10.0",
},
expectedUpgrades: []Upgrade{
{
Description: "stable version",
Before: ClusterState{
KubeVersion: "v1.8.1",
KubeVersion: "v1.9.1",
KubeletVersions: map[string]uint16{
"v1.8.1": 1,
"v1.9.1": 1,
},
KubeadmVersion: "v1.9.0",
DNSVersion: "1.14.5",
EtcdVersion: "3.0.14",
KubeadmVersion: "v1.10.0",
DNSVersion: "1.14.8",
EtcdVersion: "3.1.11",
},
After: ClusterState{
KubeVersion: "v1.9.0",
KubeadmVersion: "v1.9.0",
DNSVersion: "1.14.7",
EtcdVersion: "3.1.10",
KubeVersion: "v1.10.0",
KubeadmVersion: "v1.10.0",
DNSVersion: "1.14.8",
EtcdVersion: "3.2.14",
},
},
},
@ -155,48 +155,48 @@ func TestGetAvailableUpgrades(t *testing.T) {
},
{ // both minor version upgrade and patch version upgrade available
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3", // the kubelet are on the same version as the control plane
kubeadmVersion: "v1.8.5",
clusterVersion: "v1.9.3",
kubeletVersion: "v1.9.3", // the kubelet are on the same version as the control plane
kubeadmVersion: "v1.9.5",
stablePatchVersion: "v1.8.5",
stableVersion: "v1.9.1",
stablePatchVersion: "v1.9.5",
stableVersion: "v1.10.1",
},
expectedUpgrades: []Upgrade{
{
Description: "version in the v1.8 series",
Description: "version in the v1.9 series",
Before: ClusterState{
KubeVersion: "v1.8.3",
KubeVersion: "v1.9.3",
KubeletVersions: map[string]uint16{
"v1.8.3": 1,
"v1.9.3": 1,
},
KubeadmVersion: "v1.8.5",
DNSVersion: "1.14.5",
EtcdVersion: "3.0.14",
KubeadmVersion: "v1.9.5",
DNSVersion: "1.14.8",
EtcdVersion: "3.1.11",
},
After: ClusterState{
KubeVersion: "v1.8.5",
KubeadmVersion: "v1.8.5", // Note: The kubeadm version mustn't be "downgraded" here
DNSVersion: "1.14.5",
EtcdVersion: "3.0.17",
KubeVersion: "v1.9.5",
KubeadmVersion: "v1.9.5", // Note: The kubeadm version mustn't be "downgraded" here
DNSVersion: "1.14.8",
EtcdVersion: "3.1.11",
},
},
{
Description: "stable version",
Before: ClusterState{
KubeVersion: "v1.8.3",
KubeVersion: "v1.9.3",
KubeletVersions: map[string]uint16{
"v1.8.3": 1,
"v1.9.3": 1,
},
KubeadmVersion: "v1.8.5",
DNSVersion: "1.14.5",
EtcdVersion: "3.0.14",
KubeadmVersion: "v1.9.5",
DNSVersion: "1.14.8",
EtcdVersion: "3.1.11",
},
After: ClusterState{
KubeVersion: "v1.9.1",
KubeadmVersion: "v1.9.1",
DNSVersion: "1.14.7",
EtcdVersion: "3.1.10",
KubeVersion: "v1.10.1",
KubeadmVersion: "v1.10.1",
DNSVersion: "1.14.8",
EtcdVersion: "3.2.14",
},
},
},
@ -205,13 +205,13 @@ func TestGetAvailableUpgrades(t *testing.T) {
},
{ // allow experimental upgrades, but no upgrade available
vg: &fakeVersionGetter{
clusterVersion: "v1.9.0-alpha.2",
kubeletVersion: "v1.8.5",
kubeadmVersion: "v1.8.5",
clusterVersion: "v1.10.0-alpha.2",
kubeletVersion: "v1.9.5",
kubeadmVersion: "v1.9.5",
stablePatchVersion: "v1.8.5",
stableVersion: "v1.8.5",
latestVersion: "v1.9.0-alpha.2",
stablePatchVersion: "v1.9.5",
stableVersion: "v1.9.5",
latestVersion: "v1.10.0-alpha.2",
},
expectedUpgrades: []Upgrade{},
allowExperimental: true,
@ -219,31 +219,31 @@ func TestGetAvailableUpgrades(t *testing.T) {
},
{ // upgrade to an unstable version should be supported
vg: &fakeVersionGetter{
clusterVersion: "v1.8.5",
kubeletVersion: "v1.8.5",
kubeadmVersion: "v1.8.5",
clusterVersion: "v1.9.5",
kubeletVersion: "v1.9.5",
kubeadmVersion: "v1.9.5",
stablePatchVersion: "v1.8.5",
stableVersion: "v1.8.5",
latestVersion: "v1.9.0-alpha.2",
stablePatchVersion: "v1.9.5",
stableVersion: "v1.9.5",
latestVersion: "v1.10.0-alpha.2",
},
expectedUpgrades: []Upgrade{
{
Description: "experimental version",
Before: ClusterState{
KubeVersion: "v1.8.5",
KubeVersion: "v1.9.5",
KubeletVersions: map[string]uint16{
"v1.8.5": 1,
"v1.9.5": 1,
},
KubeadmVersion: "v1.8.5",
DNSVersion: "1.14.5",
EtcdVersion: "3.0.14",
KubeadmVersion: "v1.9.5",
DNSVersion: "1.14.8",
EtcdVersion: "3.1.11",
},
After: ClusterState{
KubeVersion: "v1.9.0-alpha.2",
KubeadmVersion: "v1.9.0-alpha.2",
DNSVersion: "1.14.7",
EtcdVersion: "3.1.10",
KubeVersion: "v1.10.0-alpha.2",
KubeadmVersion: "v1.10.0-alpha.2",
DNSVersion: "1.14.8",
EtcdVersion: "3.2.14",
},
},
},
@ -252,31 +252,31 @@ func TestGetAvailableUpgrades(t *testing.T) {
},
{ // upgrade from an unstable version to an unstable version should be supported
vg: &fakeVersionGetter{
clusterVersion: "v1.9.0-alpha.1",
kubeletVersion: "v1.8.5",
kubeadmVersion: "v1.8.5",
clusterVersion: "v1.10.0-alpha.1",
kubeletVersion: "v1.9.5",
kubeadmVersion: "v1.9.5",
stablePatchVersion: "v1.8.5",
stableVersion: "v1.8.5",
latestVersion: "v1.9.0-alpha.2",
stablePatchVersion: "v1.9.5",
stableVersion: "v1.9.5",
latestVersion: "v1.10.0-alpha.2",
},
expectedUpgrades: []Upgrade{
{
Description: "experimental version",
Before: ClusterState{
KubeVersion: "v1.9.0-alpha.1",
KubeVersion: "v1.10.0-alpha.1",
KubeletVersions: map[string]uint16{
"v1.8.5": 1,
"v1.9.5": 1,
},
KubeadmVersion: "v1.8.5",
DNSVersion: "1.14.7",
EtcdVersion: "3.0.14",
KubeadmVersion: "v1.9.5",
DNSVersion: "1.14.8",
EtcdVersion: "3.1.11",
},
After: ClusterState{
KubeVersion: "v1.9.0-alpha.2",
KubeadmVersion: "v1.9.0-alpha.2",
DNSVersion: "1.14.7",
EtcdVersion: "3.1.10",
KubeVersion: "v1.10.0-alpha.2",
KubeadmVersion: "v1.10.0-alpha.2",
DNSVersion: "1.14.8",
EtcdVersion: "3.2.14",
},
},
},
@ -285,32 +285,32 @@ func TestGetAvailableUpgrades(t *testing.T) {
},
{ // v1.X.0-alpha.0 should be ignored
vg: &fakeVersionGetter{
clusterVersion: "v1.8.5",
kubeletVersion: "v1.8.5",
kubeadmVersion: "v1.8.5",
clusterVersion: "v1.9.5",
kubeletVersion: "v1.9.5",
kubeadmVersion: "v1.9.5",
stablePatchVersion: "v1.8.5",
stableVersion: "v1.8.5",
latestDevBranchVersion: "v1.9.0-beta.1",
latestVersion: "v1.10.0-alpha.0",
stablePatchVersion: "v1.9.5",
stableVersion: "v1.9.5",
latestDevBranchVersion: "v1.10.0-beta.1",
latestVersion: "v1.11.0-alpha.0",
},
expectedUpgrades: []Upgrade{
{
Description: "experimental version",
Before: ClusterState{
KubeVersion: "v1.8.5",
KubeVersion: "v1.9.5",
KubeletVersions: map[string]uint16{
"v1.8.5": 1,
"v1.9.5": 1,
},
KubeadmVersion: "v1.8.5",
DNSVersion: "1.14.5",
EtcdVersion: "3.0.14",
KubeadmVersion: "v1.9.5",
DNSVersion: "1.14.8",
EtcdVersion: "3.1.11",
},
After: ClusterState{
KubeVersion: "v1.9.0-beta.1",
KubeadmVersion: "v1.9.0-beta.1",
DNSVersion: "1.14.7",
EtcdVersion: "3.1.10",
KubeVersion: "v1.10.0-beta.1",
KubeadmVersion: "v1.10.0-beta.1",
DNSVersion: "1.14.8",
EtcdVersion: "3.2.14",
},
},
},
@ -319,32 +319,32 @@ func TestGetAvailableUpgrades(t *testing.T) {
},
{ // upgrade to an RC version should be supported
vg: &fakeVersionGetter{
clusterVersion: "v1.8.5",
kubeletVersion: "v1.8.5",
kubeadmVersion: "v1.8.5",
clusterVersion: "v1.9.5",
kubeletVersion: "v1.9.5",
kubeadmVersion: "v1.9.5",
stablePatchVersion: "v1.8.5",
stableVersion: "v1.8.5",
latestDevBranchVersion: "v1.9.0-rc.1",
latestVersion: "v1.10.0-alpha.1",
stablePatchVersion: "v1.9.5",
stableVersion: "v1.9.5",
latestDevBranchVersion: "v1.10.0-rc.1",
latestVersion: "v1.11.0-alpha.1",
},
expectedUpgrades: []Upgrade{
{
Description: "release candidate version",
Before: ClusterState{
KubeVersion: "v1.8.5",
KubeVersion: "v1.9.5",
KubeletVersions: map[string]uint16{
"v1.8.5": 1,
"v1.9.5": 1,
},
KubeadmVersion: "v1.8.5",
DNSVersion: "1.14.5",
EtcdVersion: "3.0.14",
KubeadmVersion: "v1.9.5",
DNSVersion: "1.14.8",
EtcdVersion: "3.1.11",
},
After: ClusterState{
KubeVersion: "v1.9.0-rc.1",
KubeadmVersion: "v1.9.0-rc.1",
DNSVersion: "1.14.7",
EtcdVersion: "3.1.10",
KubeVersion: "v1.10.0-rc.1",
KubeadmVersion: "v1.10.0-rc.1",
DNSVersion: "1.14.8",
EtcdVersion: "3.2.14",
},
},
},
@ -353,32 +353,32 @@ func TestGetAvailableUpgrades(t *testing.T) {
},
{ // it is possible (but very uncommon) that the latest version from the previous branch is an rc and the current latest version is alpha.0. In that case, show the RC
vg: &fakeVersionGetter{
clusterVersion: "v1.8.5",
kubeletVersion: "v1.8.5",
kubeadmVersion: "v1.8.5",
clusterVersion: "v1.9.5",
kubeletVersion: "v1.9.5",
kubeadmVersion: "v1.9.5",
stablePatchVersion: "v1.8.5",
stableVersion: "v1.8.5",
latestDevBranchVersion: "v1.9.6-rc.1",
latestVersion: "v1.10.1-alpha.0",
stablePatchVersion: "v1.9.5",
stableVersion: "v1.9.5",
latestDevBranchVersion: "v1.10.6-rc.1",
latestVersion: "v1.11.1-alpha.0",
},
expectedUpgrades: []Upgrade{
{
Description: "experimental version", // Note that this is considered an experimental version in this uncommon scenario
Before: ClusterState{
KubeVersion: "v1.8.5",
KubeVersion: "v1.9.5",
KubeletVersions: map[string]uint16{
"v1.8.5": 1,
"v1.9.5": 1,
},
KubeadmVersion: "v1.8.5",
DNSVersion: "1.14.5",
EtcdVersion: "3.0.14",
KubeadmVersion: "v1.9.5",
DNSVersion: "1.14.8",
EtcdVersion: "3.1.11",
},
After: ClusterState{
KubeVersion: "v1.9.6-rc.1",
KubeadmVersion: "v1.9.6-rc.1",
DNSVersion: "1.14.7",
EtcdVersion: "3.1.10",
KubeVersion: "v1.10.6-rc.1",
KubeadmVersion: "v1.10.6-rc.1",
DNSVersion: "1.14.8",
EtcdVersion: "3.2.14",
},
},
},
@ -387,50 +387,50 @@ func TestGetAvailableUpgrades(t *testing.T) {
},
{ // upgrade to an RC version should be supported. There may also be an even newer unstable version.
vg: &fakeVersionGetter{
clusterVersion: "v1.8.5",
kubeletVersion: "v1.8.5",
kubeadmVersion: "v1.8.5",
clusterVersion: "v1.9.5",
kubeletVersion: "v1.9.5",
kubeadmVersion: "v1.9.5",
stablePatchVersion: "v1.8.5",
stableVersion: "v1.8.5",
latestDevBranchVersion: "v1.9.0-rc.1",
latestVersion: "v1.10.0-alpha.2",
stablePatchVersion: "v1.9.5",
stableVersion: "v1.9.5",
latestDevBranchVersion: "v1.10.0-rc.1",
latestVersion: "v1.11.0-alpha.2",
},
expectedUpgrades: []Upgrade{
{
Description: "release candidate version",
Before: ClusterState{
KubeVersion: "v1.8.5",
KubeVersion: "v1.9.5",
KubeletVersions: map[string]uint16{
"v1.8.5": 1,
"v1.9.5": 1,
},
KubeadmVersion: "v1.8.5",
DNSVersion: "1.14.5",
EtcdVersion: "3.0.14",
KubeadmVersion: "v1.9.5",
DNSVersion: "1.14.8",
EtcdVersion: "3.1.11",
},
After: ClusterState{
KubeVersion: "v1.9.0-rc.1",
KubeadmVersion: "v1.9.0-rc.1",
DNSVersion: "1.14.7",
EtcdVersion: "3.1.10",
KubeVersion: "v1.10.0-rc.1",
KubeadmVersion: "v1.10.0-rc.1",
DNSVersion: "1.14.8",
EtcdVersion: "3.2.14",
},
},
{
Description: "experimental version",
Before: ClusterState{
KubeVersion: "v1.8.5",
KubeVersion: "v1.9.5",
KubeletVersions: map[string]uint16{
"v1.8.5": 1,
"v1.9.5": 1,
},
KubeadmVersion: "v1.8.5",
DNSVersion: "1.14.5",
EtcdVersion: "3.0.14",
KubeadmVersion: "v1.9.5",
DNSVersion: "1.14.8",
EtcdVersion: "3.1.11",
},
After: ClusterState{
KubeVersion: "v1.10.0-alpha.2",
KubeadmVersion: "v1.10.0-alpha.2",
DNSVersion: "1.14.7",
EtcdVersion: "3.1.10",
KubeVersion: "v1.11.0-alpha.2",
KubeadmVersion: "v1.11.0-alpha.2",
DNSVersion: "1.14.8",
EtcdVersion: "3.2.14",
},
},
},
@ -444,7 +444,6 @@ func TestGetAvailableUpgrades(t *testing.T) {
// kubernetes release.
testCluster := fakeEtcdCluster{}
for _, rt := range tests {
actualUpgrades, actualErr := GetAvailableUpgrades(rt.vg, rt.allowExperimental, rt.allowRCs, testCluster, featureGates)
if !reflect.DeepEqual(actualUpgrades, rt.expectedUpgrades) {
t.Errorf("failed TestGetAvailableUpgrades\n\texpected upgrades: %v\n\tgot: %v", rt.expectedUpgrades, actualUpgrades)
@ -463,36 +462,36 @@ func TestKubeletUpgrade(t *testing.T) {
}{
{ // upgrade available
before: map[string]uint16{
"v1.7.1": 1,
"v1.9.1": 1,
},
after: "v1.7.3",
after: "v1.9.3",
expected: true,
},
{ // upgrade available
before: map[string]uint16{
"v1.7.1": 1,
"v1.7.3": 100,
"v1.9.1": 1,
"v1.9.3": 100,
},
after: "v1.7.3",
after: "v1.9.3",
expected: true,
},
{ // upgrade not available
before: map[string]uint16{
"v1.7.3": 1,
"v1.9.3": 1,
},
after: "v1.7.3",
after: "v1.9.3",
expected: false,
},
{ // upgrade not available
before: map[string]uint16{
"v1.7.3": 100,
"v1.9.3": 100,
},
after: "v1.7.3",
after: "v1.9.3",
expected: false,
},
{ // upgrade not available if we don't know anything about the earlier state
before: map[string]uint16{},
after: "v1.7.3",
after: "v1.9.3",
expected: false,
},
}

View File

@ -52,6 +52,23 @@ func FetchConfiguration(client clientset.Interface, w io.Writer, cfgPath string)
return versionedcfg, nil
}
// FetchConfigurationFromFile fetch configuration from a file
func FetchConfigurationFromFile(cfgPath string) (*kubeadmapiext.MasterConfiguration, error) {
// Load the configuration from a file or the cluster
configBytes, err := ioutil.ReadFile(cfgPath)
if err != nil {
return nil, err
}
// Take the versioned configuration populated from the configmap, default it and validate
// Return the internal version of the API object
versionedcfg, err := bytesToValidatedMasterConfig(configBytes)
if err != nil {
return nil, fmt.Errorf("could not decode configuration: %v", err)
}
return versionedcfg, nil
}
// loadConfigurationBytes loads the configuration byte slice from either a file or the cluster ConfigMap
func loadConfigurationBytes(client clientset.Interface, w io.Writer, cfgPath string) ([]byte, error) {
if cfgPath != "" {

View File

@ -21,7 +21,7 @@ import (
"net/http"
"os"
apps "k8s.io/api/apps/v1beta2"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@ -174,7 +174,7 @@ func getNotReadyDaemonSets(client clientset.Interface) ([]error, error) {
notReadyDaemonSets := []error{}
for _, component := range constants.MasterComponents {
dsName := constants.AddSelfHostedPrefix(component)
ds, err := client.AppsV1beta2().DaemonSets(metav1.NamespaceSystem).Get(dsName, metav1.GetOptions{})
ds, err := client.AppsV1().DaemonSets(metav1.NamespaceSystem).Get(dsName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("couldn't get daemonset %q in the %s namespace", dsName, metav1.NamespaceSystem)
}

View File

@ -32,135 +32,135 @@ func TestEnforceVersionPolicies(t *testing.T) {
}{
{ // everything ok
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.8.5",
clusterVersion: "v1.9.3",
kubeletVersion: "v1.9.3",
kubeadmVersion: "v1.9.5",
},
newK8sVersion: "v1.8.5",
newK8sVersion: "v1.9.5",
},
{ // everything ok
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.2",
kubeadmVersion: "v1.9.1",
clusterVersion: "v1.9.3",
kubeletVersion: "v1.9.2",
kubeadmVersion: "v1.10.1",
},
newK8sVersion: "v1.9.0",
newK8sVersion: "v1.10.0",
},
{ // downgrades ok
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.8.3",
clusterVersion: "v1.9.3",
kubeletVersion: "v1.9.3",
kubeadmVersion: "v1.9.3",
},
newK8sVersion: "v1.8.2",
newK8sVersion: "v1.9.2",
},
{ // upgrades without bumping the version number ok
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.8.3",
clusterVersion: "v1.9.3",
kubeletVersion: "v1.9.3",
kubeadmVersion: "v1.9.3",
},
newK8sVersion: "v1.8.3",
newK8sVersion: "v1.9.3",
},
{ // new version must be higher than v1.8.0
{ // new version must be higher than v1.9.0
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.8.3",
clusterVersion: "v1.9.3",
kubeletVersion: "v1.9.3",
kubeadmVersion: "v1.9.3",
},
newK8sVersion: "v1.7.10",
expectedMandatoryErrs: 1, // version must be higher than v1.8.0
newK8sVersion: "v1.8.10",
expectedMandatoryErrs: 1, // version must be higher than v1.9.0
},
{ // upgrading two minor versions in one go is not supported
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.10.0",
clusterVersion: "v1.9.3",
kubeletVersion: "v1.9.3",
kubeadmVersion: "v1.11.0",
},
newK8sVersion: "v1.10.0",
newK8sVersion: "v1.11.0",
expectedMandatoryErrs: 1, // can't upgrade two minor versions
expectedSkippableErrs: 1, // kubelet <-> apiserver skew too large
},
{ // downgrading two minor versions in one go is not supported
vg: &fakeVersionGetter{
clusterVersion: "v1.10.3",
kubeletVersion: "v1.10.3",
kubeadmVersion: "v1.10.0",
clusterVersion: "v1.11.3",
kubeletVersion: "v1.11.3",
kubeadmVersion: "v1.11.0",
},
newK8sVersion: "v1.8.3",
newK8sVersion: "v1.9.3",
expectedMandatoryErrs: 1, // can't downgrade two minor versions
},
{ // kubeadm version must be higher than the new kube version. However, patch version skews may be forced
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.8.3",
clusterVersion: "v1.9.3",
kubeletVersion: "v1.9.3",
kubeadmVersion: "v1.9.3",
},
newK8sVersion: "v1.8.5",
newK8sVersion: "v1.9.5",
expectedSkippableErrs: 1,
},
{ // kubeadm version must be higher than the new kube version. Trying to upgrade k8s to a higher minor version than kubeadm itself should never be supported
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.8.3",
clusterVersion: "v1.9.3",
kubeletVersion: "v1.9.3",
kubeadmVersion: "v1.9.3",
},
newK8sVersion: "v1.9.0",
newK8sVersion: "v1.10.0",
expectedMandatoryErrs: 1,
},
{ // the maximum skew between the cluster version and the kubelet versions should be one minor version. This may be forced through though.
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.7.8",
kubeadmVersion: "v1.9.0",
clusterVersion: "v1.9.3",
kubeletVersion: "v1.8.8",
kubeadmVersion: "v1.10.0",
},
newK8sVersion: "v1.9.0",
newK8sVersion: "v1.10.0",
expectedSkippableErrs: 1,
},
{ // experimental upgrades supported if the flag is set
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.9.0-beta.1",
clusterVersion: "v1.9.3",
kubeletVersion: "v1.9.3",
kubeadmVersion: "v1.10.0-beta.1",
},
newK8sVersion: "v1.9.0-beta.1",
newK8sVersion: "v1.10.0-beta.1",
allowExperimental: true,
},
{ // release candidate upgrades supported if the flag is set
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.9.0-rc.1",
clusterVersion: "v1.9.3",
kubeletVersion: "v1.9.3",
kubeadmVersion: "v1.10.0-rc.1",
},
newK8sVersion: "v1.9.0-rc.1",
newK8sVersion: "v1.10.0-rc.1",
allowRCs: true,
},
{ // release candidate upgrades supported if the flag is set
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.9.0-rc.1",
clusterVersion: "v1.9.3",
kubeletVersion: "v1.9.3",
kubeadmVersion: "v1.10.0-rc.1",
},
newK8sVersion: "v1.9.0-rc.1",
newK8sVersion: "v1.10.0-rc.1",
allowExperimental: true,
},
{ // the user should not be able to upgrade to an experimental version if they haven't opted into that
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.9.0-beta.1",
clusterVersion: "v1.9.3",
kubeletVersion: "v1.9.3",
kubeadmVersion: "v1.10.0-beta.1",
},
newK8sVersion: "v1.9.0-beta.1",
newK8sVersion: "v1.10.0-beta.1",
allowRCs: true,
expectedSkippableErrs: 1,
},
{ // the user should not be able to upgrade to an release candidate version if they haven't opted into that
vg: &fakeVersionGetter{
clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.9.0-rc.1",
clusterVersion: "v1.9.3",
kubeletVersion: "v1.9.3",
kubeadmVersion: "v1.10.0-rc.1",
},
newK8sVersion: "v1.9.0-rc.1",
newK8sVersion: "v1.10.0-rc.1",
expectedSkippableErrs: 1,
},
}

View File

@ -21,6 +21,7 @@ import (
"os"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
@ -62,7 +63,7 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterC
errs = append(errs, err)
}
// Create/update RBAC rules that makes the 1.8.0+ nodes to rotate certificates and get their CSRs approved automatically
// Create/update RBAC rules that makes the nodes to rotate certificates and get their CSRs approved automatically
if err := nodebootstraptoken.AutoApproveNodeCertificateRotation(client); err != nil {
errs = append(errs, err)
}
@ -117,14 +118,18 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterC
func removeOldKubeDNSDeploymentIfCoreDNSIsUsed(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
if features.Enabled(cfg.FeatureGates, features.CoreDNS) {
return apiclient.TryRunCommand(func() error {
coreDNSDeployment, err := client.AppsV1beta2().Deployments(metav1.NamespaceSystem).Get(kubeadmconstants.CoreDNS, metav1.GetOptions{})
coreDNSDeployment, err := client.AppsV1().Deployments(metav1.NamespaceSystem).Get(kubeadmconstants.CoreDNS, metav1.GetOptions{})
if err != nil {
return err
}
if coreDNSDeployment.Status.ReadyReplicas == 0 {
return fmt.Errorf("the CodeDNS deployment isn't ready yet")
return fmt.Errorf("the CoreDNS deployment isn't ready yet")
}
return apiclient.DeleteDeploymentForeground(client, metav1.NamespaceSystem, kubeadmconstants.KubeDNS)
err = apiclient.DeleteDeploymentForeground(client, metav1.NamespaceSystem, kubeadmconstants.KubeDNS)
if err != nil && !apierrors.IsNotFound(err) {
return err
}
return nil
}, 10)
}
return nil

View File

@ -136,10 +136,6 @@ func TestShouldBackupAPIServerCertAndKey(t *testing.T) {
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
NodeName: "test-node",
}
caCert, caKey, err := certsphase.NewCACertAndKey()
if err != nil {
t.Fatalf("failed creation of ca cert and key: %v", err)
}
for desc, test := range map[string]struct {
adjustedExpiry time.Duration
@ -160,6 +156,10 @@ func TestShouldBackupAPIServerCertAndKey(t *testing.T) {
expected: true,
},
} {
caCert, caKey, err := certsphase.NewCACertAndKey()
if err != nil {
t.Fatalf("failed creation of ca cert and key: %v", err)
}
caCert.NotBefore = caCert.NotBefore.Add(-test.adjustedExpiry).UTC()
apiCert, apiKey, err := certsphase.NewAPIServerCertAndKey(cfg, caCert, caKey)
if err != nil {

View File

@ -20,7 +20,7 @@ import (
"fmt"
"time"
apps "k8s.io/api/apps/v1beta2"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
@ -41,7 +41,7 @@ type Prepuller interface {
DeleteFunc(string) error
}
// DaemonSetPrepuller makes sure the control plane images are availble on all masters
// DaemonSetPrepuller makes sure the control plane images are available on all masters
type DaemonSetPrepuller struct {
client clientset.Interface
cfg *kubeadmapi.MasterConfiguration
@ -99,11 +99,11 @@ func PrepullImagesInParallel(kubePrepuller Prepuller, timeout time.Duration) err
}
}
// Create a channel for streaming data from goroutines that run in parallell to a blocking for loop that cleans up
// Create a channel for streaming data from goroutines that run in parallel to a blocking for loop that cleans up
prePulledChan := make(chan string, len(componentsToPrepull))
for _, component := range componentsToPrepull {
go func(c string) {
// Wait as long as needed. This WaitFunc call should be blocking until completetion
// Wait as long as needed. This WaitFunc call should be blocking until completion
kubePrepuller.WaitFunc(c)
// When the task is done, go ahead and cleanup by sending the name to the channel
prePulledChan <- c

View File

@ -20,7 +20,7 @@ import (
"fmt"
"time"
apps "k8s.io/api/apps/v1beta2"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
@ -119,7 +119,7 @@ func SelfHostedControlPlane(client clientset.Interface, waiter apiclient.Waiter,
// During this upgrade; the temporary/backup component will take over
if err := apiclient.TryRunCommand(func() error {
if _, err := client.AppsV1beta2().DaemonSets(newDS.ObjectMeta.Namespace).Update(newDS); err != nil {
if _, err := client.AppsV1().DaemonSets(newDS.ObjectMeta.Namespace).Update(newDS); err != nil {
return fmt.Errorf("couldn't update self-hosted component's DaemonSet: %v", err)
}
return nil
@ -256,7 +256,7 @@ func getCurrentControlPlaneComponentResources(client clientset.Interface) (map[s
if err := apiclient.TryRunCommand(func() error {
var tryrunerr error
// Try to get the current self-hosted component
currentDS, tryrunerr = client.AppsV1beta2().DaemonSets(metav1.NamespaceSystem).Get(dsName, metav1.GetOptions{})
currentDS, tryrunerr = client.AppsV1().DaemonSets(metav1.NamespaceSystem).Get(dsName, metav1.GetOptions{})
return tryrunerr // note that tryrunerr is most likely nil here (in successful cases)
}, selfHostingFailureThreshold); err != nil {
return nil, err

View File

@ -23,7 +23,8 @@ import (
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane"
certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
controlplanephase "k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane"
etcdphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd"
"k8s.io/kubernetes/cmd/kubeadm/app/util"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
@ -133,6 +134,22 @@ func upgradeComponent(component string, waiter apiclient.Waiter, pathMgr StaticP
if component == constants.Etcd {
recoverEtcd = true
}
// ensure etcd certs are generated for etcd and kube-apiserver
if component == constants.Etcd {
if err := certsphase.CreateEtcdServerCertAndKeyFiles(cfg); err != nil {
return fmt.Errorf("failed to upgrade the %s certificate: %v", constants.Etcd, err)
}
if err := certsphase.CreateEtcdPeerCertAndKeyFiles(cfg); err != nil {
return fmt.Errorf("failed to upgrade the %s peer certificate: %v", constants.Etcd, err)
}
}
if component == constants.KubeAPIServer {
if err := certsphase.CreateAPIServerEtcdClientCertAndKeyFiles(cfg); err != nil {
return fmt.Errorf("failed to upgrade the %s %s-client certificate: %v", constants.KubeAPIServer, constants.Etcd, err)
}
}
// The old manifest is here; in the /etc/kubernetes/manifests/
currentManifestPath := pathMgr.RealManifestPath(component)
// The new, upgraded manifest will be written here
@ -180,7 +197,7 @@ func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathM
if len(cfg.Etcd.Endpoints) != 0 {
return false, fmt.Errorf("external etcd detected, won't try to change any etcd state")
}
// Checking health state of etcd before proceeding with the upgrtade
// Checking health state of etcd before proceeding with the upgrade
etcdCluster := util.LocalEtcdCluster{}
etcdStatus, err := etcdCluster.GetEtcdClusterStatus()
if err != nil {
@ -191,7 +208,7 @@ func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathM
backupEtcdDir := pathMgr.BackupEtcdDir()
runningEtcdDir := cfg.Etcd.DataDir
if err := util.CopyDir(runningEtcdDir, backupEtcdDir); err != nil {
return true, fmt.Errorf("fail to back up etcd data: %v", err)
return true, fmt.Errorf("failed to back up etcd data: %v", err)
}
// Need to check currently used version and version from constants, if differs then upgrade
@ -215,11 +232,11 @@ func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathM
beforeEtcdPodHash, err := waiter.WaitForStaticPodSingleHash(cfg.NodeName, constants.Etcd)
if err != nil {
return true, fmt.Errorf("fail to get etcd pod's hash: %v", err)
return true, fmt.Errorf("failed to get etcd pod's hash: %v", err)
}
// Write the updated etcd static Pod manifest into the temporary directory, at this point no etcd change
// has occured in any aspects.
// has occurred in any aspects.
if err := etcdphase.CreateLocalEtcdStaticPodManifestFile(pathMgr.TempManifestDir(), cfg); err != nil {
return true, fmt.Errorf("error creating local etcd static pod manifest file: %v", err)
}
@ -227,7 +244,7 @@ func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathM
// Perform etcd upgrade using common to all control plane components function
if err := upgradeComponent(constants.Etcd, waiter, pathMgr, cfg, beforeEtcdPodHash, recoverManifests); err != nil {
// Since etcd upgrade component failed, the old manifest has been restored
// now we need to check the heatlth of etcd cluster if it came back up with old manifest
// now we need to check the health of etcd cluster if it came back up with old manifest
if _, err := etcdCluster.GetEtcdClusterStatus(); err != nil {
// At this point we know that etcd cluster is dead and it is safe to copy backup datastore and to rollback old etcd manifest
if err := rollbackEtcdData(cfg, fmt.Errorf("etcd cluster is not healthy after upgrade: %v rolling back", err), pathMgr); err != nil {
@ -253,7 +270,7 @@ func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathM
// Checking health state of etcd after the upgrade
if _, err = etcdCluster.GetEtcdClusterStatus(); err != nil {
// Despite the fact that upgradeComponent was sucessfull, there is something wrong with etcd cluster
// Despite the fact that upgradeComponent was successful, there is something wrong with etcd cluster
// First step is to restore back up of datastore
if err := rollbackEtcdData(cfg, fmt.Errorf("etcd cluster is not healthy after upgrade: %v rolling back", err), pathMgr); err != nil {
// Even copying back datastore failed, no options for recovery left, bailing out
@ -299,7 +316,7 @@ func StaticPodControlPlane(waiter apiclient.Waiter, pathMgr StaticPodPathManager
// Write the updated static Pod manifests into the temporary directory
fmt.Printf("[upgrade/staticpods] Writing new Static Pod manifests to %q\n", pathMgr.TempManifestDir())
err = controlplane.CreateInitStaticPodManifestFiles(pathMgr.TempManifestDir(), cfg)
err = controlplanephase.CreateInitStaticPodManifestFiles(pathMgr.TempManifestDir(), cfg)
if err != nil {
return fmt.Errorf("error creating init static pod manifest files: %v", err)
}
@ -324,7 +341,7 @@ func StaticPodControlPlane(waiter apiclient.Waiter, pathMgr StaticPodPathManager
func rollbackOldManifests(oldManifests map[string]string, origErr error, pathMgr StaticPodPathManager, restoreEtcd bool) error {
errs := []error{origErr}
for component, backupPath := range oldManifests {
// Will restore etcd manifest only if it was explicitely requested by setting restoreEtcd to True
// Will restore etcd manifest only if it was explicitly requested by setting restoreEtcd to True
if component == constants.Etcd && !restoreEtcd {
continue
}
@ -337,7 +354,7 @@ func rollbackOldManifests(oldManifests map[string]string, origErr error, pathMgr
errs = append(errs, err)
}
}
// Let the user know there we're problems, but we tried to reçover
// Let the user know there were problems, but we tried to recover
return fmt.Errorf("couldn't upgrade control plane. kubeadm has tried to recover everything into the earlier state. Errors faced: %v", errs)
}

View File

@ -29,7 +29,8 @@ import (
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane"
certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
controlplanephase "k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane"
etcdphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
"k8s.io/kubernetes/pkg/api/legacyscheme"
@ -49,7 +50,7 @@ apiServerExtraArgs: null
authorizationModes:
- Node
- RBAC
certificatesDir: /etc/kubernetes/pki
certificatesDir: %s
cloudProvider: ""
controllerManagerExtraArgs: null
etcd:
@ -60,8 +61,10 @@ etcd:
extraArgs: null
image: ""
keyFile: ""
serverCertSANs: null
peerCertSANs: null
featureFlags: null
imageRepository: gcr.io/google_containers
imageRepository: k8s.gcr.io
kubernetesVersion: %s
networking:
dnsDomain: cluster.local
@ -305,12 +308,39 @@ func TestStaticPodControlPlane(t *testing.T) {
defer os.RemoveAll(pathMgr.TempManifestDir())
defer os.RemoveAll(pathMgr.BackupManifestDir())
oldcfg, err := getConfig("v1.7.0")
tempCertsDir, err := ioutil.TempDir("", "kubeadm-certs")
if err != nil {
t.Fatalf("couldn't create temporary certificates directory: %v", err)
}
defer os.RemoveAll(tempCertsDir)
oldcfg, err := getConfig("v1.7.0", tempCertsDir)
if err != nil {
t.Fatalf("couldn't create config: %v", err)
}
// Initialize PKI minus any etcd certificates to simulate etcd PKI upgrade
certActions := []func(cfg *kubeadmapi.MasterConfiguration) error{
certsphase.CreateCACertAndKeyfiles,
certsphase.CreateAPIServerCertAndKeyFiles,
certsphase.CreateAPIServerKubeletClientCertAndKeyFiles,
// certsphase.CreateEtcdServerCertAndKeyFiles,
// certsphase.CreateEtcdPeerCertAndKeyFiles,
// certsphase.CreateAPIServerEtcdClientCertAndKeyFiles,
certsphase.CreateServiceAccountKeyAndPublicKeyFiles,
certsphase.CreateFrontProxyCACertAndKeyFiles,
certsphase.CreateFrontProxyClientCertAndKeyFiles,
}
for _, action := range certActions {
err := action(oldcfg)
if err != nil {
t.Fatalf("couldn't initialize pre-upgrade certificate: %v", err)
}
}
fmt.Printf("Wrote certs to %s\n", oldcfg.CertificatesDir)
// Initialize the directory with v1.7 manifests; should then be upgraded to v1.8 using the method
err = controlplane.CreateInitStaticPodManifestFiles(pathMgr.RealManifestDir(), oldcfg)
err = controlplanephase.CreateInitStaticPodManifestFiles(pathMgr.RealManifestDir(), oldcfg)
if err != nil {
t.Fatalf("couldn't run CreateInitStaticPodManifestFiles: %v", err)
}
@ -324,7 +354,7 @@ func TestStaticPodControlPlane(t *testing.T) {
t.Fatalf("couldn't read temp file: %v", err)
}
newcfg, err := getConfig("v1.8.0")
newcfg, err := getConfig("v1.8.0", tempCertsDir)
if err != nil {
t.Fatalf("couldn't create config: %v", err)
}
@ -332,9 +362,10 @@ func TestStaticPodControlPlane(t *testing.T) {
actualErr := StaticPodControlPlane(waiter, pathMgr, newcfg, false)
if (actualErr != nil) != rt.expectedErr {
t.Errorf(
"failed UpgradeStaticPodControlPlane\n\texpected error: %t\n\tgot: %t",
"failed UpgradeStaticPodControlPlane\n\texpected error: %t\n\tgot: %t\n\tactual error: %v",
rt.expectedErr,
(actualErr != nil),
actualErr,
)
}
@ -365,10 +396,10 @@ func getAPIServerHash(dir string) (string, error) {
return fmt.Sprintf("%x", sha256.Sum256(fileBytes)), nil
}
func getConfig(version string) (*kubeadmapi.MasterConfiguration, error) {
func getConfig(version string, certsDir string) (*kubeadmapi.MasterConfiguration, error) {
externalcfg := &kubeadmapiext.MasterConfiguration{}
internalcfg := &kubeadmapi.MasterConfiguration{}
if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), []byte(fmt.Sprintf(testConfiguration, version)), externalcfg); err != nil {
if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), []byte(fmt.Sprintf(testConfiguration, certsDir, version)), externalcfg); err != nil {
return nil, fmt.Errorf("unable to decode config: %v", err)
}
legacyscheme.Scheme.Convert(externalcfg, internalcfg, nil)

View File

@ -39,8 +39,7 @@ filegroup(
go_test(
name = "go_default_test",
srcs = ["uploadconfig_test.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig",
library = ":go_default_library",
embed = [":go_default_library"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library",