mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
13
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns/BUILD
generated
vendored
13
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns/BUILD
generated
vendored
@ -8,17 +8,15 @@ load(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"dns_test.go",
|
||||
"versions_test.go",
|
||||
],
|
||||
srcs = ["dns_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
@ -30,7 +28,6 @@ go_library(
|
||||
srcs = [
|
||||
"dns.go",
|
||||
"manifests.go",
|
||||
"versions.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns",
|
||||
deps = [
|
||||
@ -39,8 +36,7 @@ go_library(
|
||||
"//cmd/kubeadm/app/features:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//cmd/kubeadm/app/util/apiclient:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//vendor/github.com/mholt/caddy/caddyfile:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1:go_default_library",
|
||||
@ -48,6 +44,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
232
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns/dns.go
generated
vendored
232
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns/dns.go
generated
vendored
@ -17,8 +17,12 @@ limitations under the License.
|
||||
package dns
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/mholt/caddy/caddyfile"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
@ -27,33 +31,53 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kuberuntime "k8s.io/apimachinery/pkg/runtime"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/features"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/util/version"
|
||||
)
|
||||
|
||||
const (
|
||||
// KubeDNSServiceAccountName describes the name of the ServiceAccount for the kube-dns addon
|
||||
KubeDNSServiceAccountName = "kube-dns"
|
||||
KubeDNSServiceAccountName = "kube-dns"
|
||||
kubeDNSStubDomain = "stubDomains"
|
||||
kubeDNSUpstreamNameservers = "upstreamNameservers"
|
||||
kubeDNSFederation = "federations"
|
||||
)
|
||||
|
||||
// DeployedDNSAddon returns the type of DNS addon currently deployed
|
||||
func DeployedDNSAddon(client clientset.Interface) (string, string, error) {
|
||||
deploymentsClient := client.AppsV1().Deployments(metav1.NamespaceSystem)
|
||||
deployments, err := deploymentsClient.List(metav1.ListOptions{LabelSelector: "k8s-app=kube-dns"})
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("couldn't retrieve DNS addon deployments: %v", err)
|
||||
}
|
||||
|
||||
switch len(deployments.Items) {
|
||||
case 0:
|
||||
return "", "", nil
|
||||
case 1:
|
||||
addonName := deployments.Items[0].Name
|
||||
addonImage := deployments.Items[0].Spec.Template.Spec.Containers[0].Image
|
||||
addonImageParts := strings.Split(addonImage, ":")
|
||||
addonVersion := addonImageParts[len(addonImageParts)-1]
|
||||
return addonName, addonVersion, nil
|
||||
default:
|
||||
return "", "", fmt.Errorf("multiple DNS addon deployments found: %v", deployments.Items)
|
||||
}
|
||||
}
|
||||
|
||||
// EnsureDNSAddon creates the kube-dns or CoreDNS addon
|
||||
func EnsureDNSAddon(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
|
||||
k8sVersion, err := version.ParseSemantic(cfg.KubernetesVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't parse kubernetes version %q: %v", cfg.KubernetesVersion, err)
|
||||
}
|
||||
if features.Enabled(cfg.FeatureGates, features.CoreDNS) {
|
||||
return coreDNSAddon(cfg, client, k8sVersion)
|
||||
return coreDNSAddon(cfg, client)
|
||||
}
|
||||
return kubeDNSAddon(cfg, client, k8sVersion)
|
||||
return kubeDNSAddon(cfg, client)
|
||||
}
|
||||
|
||||
func kubeDNSAddon(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface, k8sVersion *version.Version) error {
|
||||
func kubeDNSAddon(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
|
||||
if err := CreateServiceAccount(client); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -72,18 +96,15 @@ func kubeDNSAddon(cfg *kubeadmapi.MasterConfiguration, client clientset.Interfac
|
||||
dnsProbeAddr = dnsBindAddr
|
||||
}
|
||||
|
||||
// Get the YAML manifest conditionally based on the k8s version
|
||||
kubeDNSDeploymentBytes := GetKubeDNSManifest(k8sVersion)
|
||||
dnsDeploymentBytes, err := kubeadmutil.ParseTemplate(kubeDNSDeploymentBytes,
|
||||
dnsDeploymentBytes, err := kubeadmutil.ParseTemplate(KubeDNSDeployment,
|
||||
struct{ ImageRepository, Arch, Version, DNSBindAddr, DNSProbeAddr, DNSDomain, MasterTaintKey string }{
|
||||
ImageRepository: cfg.ImageRepository,
|
||||
Arch: runtime.GOARCH,
|
||||
// Get the kube-dns version conditionally based on the k8s version
|
||||
Version: GetDNSVersion(k8sVersion, kubeadmconstants.KubeDNS),
|
||||
DNSBindAddr: dnsBindAddr,
|
||||
DNSProbeAddr: dnsProbeAddr,
|
||||
DNSDomain: cfg.Networking.DNSDomain,
|
||||
MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster,
|
||||
Version: kubeadmconstants.KubeDNSVersion,
|
||||
DNSBindAddr: dnsBindAddr,
|
||||
DNSProbeAddr: dnsProbeAddr,
|
||||
DNSDomain: cfg.Networking.DNSDomain,
|
||||
MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when parsing kube-dns deployment template: %v", err)
|
||||
@ -116,7 +137,7 @@ func CreateServiceAccount(client clientset.Interface) error {
|
||||
|
||||
func createKubeDNSAddon(deploymentBytes, serviceBytes []byte, client clientset.Interface) error {
|
||||
kubednsDeployment := &apps.Deployment{}
|
||||
if err := kuberuntime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), deploymentBytes, kubednsDeployment); err != nil {
|
||||
if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), deploymentBytes, kubednsDeployment); err != nil {
|
||||
return fmt.Errorf("unable to decode kube-dns deployment %v", err)
|
||||
}
|
||||
|
||||
@ -129,21 +150,44 @@ func createKubeDNSAddon(deploymentBytes, serviceBytes []byte, client clientset.I
|
||||
return createDNSService(kubednsService, serviceBytes, client)
|
||||
}
|
||||
|
||||
func coreDNSAddon(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface, k8sVersion *version.Version) error {
|
||||
// Get the YAML manifest conditionally based on the k8s version
|
||||
dnsDeploymentBytes := GetCoreDNSManifest(k8sVersion)
|
||||
coreDNSDeploymentBytes, err := kubeadmutil.ParseTemplate(dnsDeploymentBytes, struct{ MasterTaintKey, Version string }{
|
||||
MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster,
|
||||
Version: GetDNSVersion(k8sVersion, kubeadmconstants.CoreDNS),
|
||||
func coreDNSAddon(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
|
||||
// Get the YAML manifest
|
||||
coreDNSDeploymentBytes, err := kubeadmutil.ParseTemplate(CoreDNSDeployment, struct{ ImageRepository, MasterTaintKey, Version string }{
|
||||
ImageRepository: cfg.ImageRepository,
|
||||
MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster,
|
||||
Version: kubeadmconstants.CoreDNSVersion,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when parsing CoreDNS deployment template: %v", err)
|
||||
}
|
||||
|
||||
// Get the kube-dns ConfigMap for translation to equivalent CoreDNS Config.
|
||||
kubeDNSConfigMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(kubeadmconstants.KubeDNS, metav1.GetOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
stubDomain, err := translateStubDomainOfKubeDNSToProxyCoreDNS(kubeDNSStubDomain, kubeDNSConfigMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
upstreamNameserver, err := translateUpstreamNameServerOfKubeDNSToUpstreamProxyCoreDNS(kubeDNSUpstreamNameservers, kubeDNSConfigMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
coreDNSDomain := cfg.Networking.DNSDomain
|
||||
federations, err := translateFederationsofKubeDNSToCoreDNS(kubeDNSFederation, coreDNSDomain, kubeDNSConfigMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the config file for CoreDNS
|
||||
coreDNSConfigMapBytes, err := kubeadmutil.ParseTemplate(CoreDNSConfigMap, struct{ DNSDomain, ServiceCIDR string }{
|
||||
ServiceCIDR: cfg.Networking.ServiceSubnet,
|
||||
DNSDomain: cfg.Networking.DNSDomain,
|
||||
coreDNSConfigMapBytes, err := kubeadmutil.ParseTemplate(CoreDNSConfigMap, struct{ DNSDomain, UpstreamNameserver, Federation, StubDomain string }{
|
||||
DNSDomain: coreDNSDomain,
|
||||
UpstreamNameserver: upstreamNameserver,
|
||||
Federation: federations,
|
||||
StubDomain: stubDomain,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when parsing CoreDNS configMap template: %v", err)
|
||||
@ -171,17 +215,17 @@ func coreDNSAddon(cfg *kubeadmapi.MasterConfiguration, client clientset.Interfac
|
||||
|
||||
func createCoreDNSAddon(deploymentBytes, serviceBytes, configBytes []byte, client clientset.Interface) error {
|
||||
coreDNSConfigMap := &v1.ConfigMap{}
|
||||
if err := kuberuntime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), configBytes, coreDNSConfigMap); err != nil {
|
||||
if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), configBytes, coreDNSConfigMap); err != nil {
|
||||
return fmt.Errorf("unable to decode CoreDNS configmap %v", err)
|
||||
}
|
||||
|
||||
// Create the ConfigMap for CoreDNS or update it in case it already exists
|
||||
if err := apiclient.CreateOrUpdateConfigMap(client, coreDNSConfigMap); err != nil {
|
||||
// Create the ConfigMap for CoreDNS or retain it in case it already exists
|
||||
if err := apiclient.CreateOrRetainConfigMap(client, coreDNSConfigMap, kubeadmconstants.CoreDNS); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
coreDNSClusterRoles := &rbac.ClusterRole{}
|
||||
if err := kuberuntime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), []byte(CoreDNSClusterRole), coreDNSClusterRoles); err != nil {
|
||||
if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), []byte(CoreDNSClusterRole), coreDNSClusterRoles); err != nil {
|
||||
return fmt.Errorf("unable to decode CoreDNS clusterroles %v", err)
|
||||
}
|
||||
|
||||
@ -191,7 +235,7 @@ func createCoreDNSAddon(deploymentBytes, serviceBytes, configBytes []byte, clien
|
||||
}
|
||||
|
||||
coreDNSClusterRolesBinding := &rbac.ClusterRoleBinding{}
|
||||
if err := kuberuntime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), []byte(CoreDNSClusterRoleBinding), coreDNSClusterRolesBinding); err != nil {
|
||||
if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), []byte(CoreDNSClusterRoleBinding), coreDNSClusterRolesBinding); err != nil {
|
||||
return fmt.Errorf("unable to decode CoreDNS clusterrolebindings %v", err)
|
||||
}
|
||||
|
||||
@ -201,7 +245,7 @@ func createCoreDNSAddon(deploymentBytes, serviceBytes, configBytes []byte, clien
|
||||
}
|
||||
|
||||
coreDNSServiceAccount := &v1.ServiceAccount{}
|
||||
if err := kuberuntime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), []byte(CoreDNSServiceAccount), coreDNSServiceAccount); err != nil {
|
||||
if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), []byte(CoreDNSServiceAccount), coreDNSServiceAccount); err != nil {
|
||||
return fmt.Errorf("unable to decode CoreDNS serviceaccount %v", err)
|
||||
}
|
||||
|
||||
@ -211,7 +255,7 @@ func createCoreDNSAddon(deploymentBytes, serviceBytes, configBytes []byte, clien
|
||||
}
|
||||
|
||||
coreDNSDeployment := &apps.Deployment{}
|
||||
if err := kuberuntime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), deploymentBytes, coreDNSDeployment); err != nil {
|
||||
if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), deploymentBytes, coreDNSDeployment); err != nil {
|
||||
return fmt.Errorf("unable to decode CoreDNS deployment %v", err)
|
||||
}
|
||||
|
||||
@ -225,7 +269,7 @@ func createCoreDNSAddon(deploymentBytes, serviceBytes, configBytes []byte, clien
|
||||
}
|
||||
|
||||
func createDNSService(dnsService *v1.Service, serviceBytes []byte, client clientset.Interface) error {
|
||||
if err := kuberuntime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), serviceBytes, dnsService); err != nil {
|
||||
if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), serviceBytes, dnsService); err != nil {
|
||||
return fmt.Errorf("unable to decode the DNS service %v", err)
|
||||
}
|
||||
|
||||
@ -244,3 +288,117 @@ func createDNSService(dnsService *v1.Service, serviceBytes []byte, client client
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// translateStubDomainOfKubeDNSToProxyCoreDNS translates StubDomain Data in kube-dns ConfigMap
|
||||
// in the form of Proxy for the CoreDNS Corefile.
|
||||
func translateStubDomainOfKubeDNSToProxyCoreDNS(dataField string, kubeDNSConfigMap *v1.ConfigMap) (string, error) {
|
||||
if kubeDNSConfigMap == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if proxy, ok := kubeDNSConfigMap.Data[dataField]; ok {
|
||||
stubDomainData := make(map[string][]string)
|
||||
err := json.Unmarshal([]byte(proxy), &stubDomainData)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse JSON from 'kube-dns ConfigMap: %v", err)
|
||||
}
|
||||
|
||||
var proxyStanza []interface{}
|
||||
for domain, proxyIP := range stubDomainData {
|
||||
pStanza := map[string]interface{}{}
|
||||
pStanza["keys"] = []string{domain + ":53"}
|
||||
pStanza["body"] = [][]string{
|
||||
{"errors"},
|
||||
{"cache", "30"},
|
||||
append([]string{"proxy", "."}, proxyIP...),
|
||||
}
|
||||
proxyStanza = append(proxyStanza, pStanza)
|
||||
}
|
||||
stanzasBytes, err := json.Marshal(proxyStanza)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
corefileStanza, err := caddyfile.FromJSON(stanzasBytes)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return prepCorefileFormat(string(corefileStanza), 4), nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// translateUpstreamNameServerOfKubeDNSToUpstreamProxyCoreDNS translates UpstreamNameServer Data in kube-dns ConfigMap
|
||||
// in the form of Proxy for the CoreDNS Corefile.
|
||||
func translateUpstreamNameServerOfKubeDNSToUpstreamProxyCoreDNS(dataField string, kubeDNSConfigMap *v1.ConfigMap) (string, error) {
|
||||
if kubeDNSConfigMap == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if upstreamValues, ok := kubeDNSConfigMap.Data[dataField]; ok {
|
||||
var upstreamProxyIP []string
|
||||
|
||||
err := json.Unmarshal([]byte(upstreamValues), &upstreamProxyIP)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse JSON from 'kube-dns ConfigMap: %v", err)
|
||||
}
|
||||
|
||||
coreDNSProxyStanzaList := strings.Join(upstreamProxyIP, " ")
|
||||
return coreDNSProxyStanzaList, nil
|
||||
}
|
||||
return "/etc/resolv.conf", nil
|
||||
}
|
||||
|
||||
// translateFederationsofKubeDNSToCoreDNS translates Federations Data in kube-dns ConfigMap
|
||||
// to Federation for CoreDNS Corefile.
|
||||
func translateFederationsofKubeDNSToCoreDNS(dataField, coreDNSDomain string, kubeDNSConfigMap *v1.ConfigMap) (string, error) {
|
||||
if kubeDNSConfigMap == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if federation, ok := kubeDNSConfigMap.Data[dataField]; ok {
|
||||
var (
|
||||
federationStanza []interface{}
|
||||
body [][]string
|
||||
)
|
||||
federationData := make(map[string]string)
|
||||
|
||||
err := json.Unmarshal([]byte(federation), &federationData)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse JSON from kube-dns ConfigMap: %v", err)
|
||||
}
|
||||
fStanza := map[string]interface{}{}
|
||||
|
||||
for name, domain := range federationData {
|
||||
body = append(body, []string{name, domain})
|
||||
}
|
||||
federationStanza = append(federationStanza, fStanza)
|
||||
fStanza["keys"] = []string{"federation " + coreDNSDomain}
|
||||
fStanza["body"] = body
|
||||
stanzasBytes, err := json.Marshal(federationStanza)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
corefileStanza, err := caddyfile.FromJSON(stanzasBytes)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return prepCorefileFormat(string(corefileStanza), 8), nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// prepCorefileFormat indents the output of the Corefile caddytext and replaces tabs with spaces
|
||||
// to neatly format the configmap, making it readable.
|
||||
func prepCorefileFormat(s string, indentation int) string {
|
||||
r := []string{}
|
||||
for _, line := range strings.Split(s, "\n") {
|
||||
indented := strings.Repeat(" ", indentation) + line
|
||||
r = append(r, indented)
|
||||
}
|
||||
corefile := strings.Join(r, "\n")
|
||||
return "\n" + strings.Replace(corefile, "\t", " ", -1)
|
||||
}
|
||||
|
255
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns/dns_test.go
generated
vendored
255
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns/dns_test.go
generated
vendored
@ -17,9 +17,12 @@ limitations under the License.
|
||||
package dns
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientsetfake "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
@ -91,7 +94,7 @@ func TestCompileManifests(t *testing.T) {
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
manifest: v180AndAboveKubeDNSDeployment,
|
||||
manifest: KubeDNSDeployment,
|
||||
data: struct{ ImageRepository, Arch, Version, DNSBindAddr, DNSProbeAddr, DNSDomain, MasterTaintKey string }{
|
||||
ImageRepository: "foo",
|
||||
Arch: "foo",
|
||||
@ -112,9 +115,10 @@ func TestCompileManifests(t *testing.T) {
|
||||
},
|
||||
{
|
||||
manifest: CoreDNSDeployment,
|
||||
data: struct{ MasterTaintKey, Version string }{
|
||||
MasterTaintKey: "foo",
|
||||
Version: "foo",
|
||||
data: struct{ ImageRepository, MasterTaintKey, Version string }{
|
||||
ImageRepository: "foo",
|
||||
MasterTaintKey: "foo",
|
||||
Version: "foo",
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
@ -127,9 +131,11 @@ func TestCompileManifests(t *testing.T) {
|
||||
},
|
||||
{
|
||||
manifest: CoreDNSConfigMap,
|
||||
data: struct{ DNSDomain, ServiceCIDR string }{
|
||||
DNSDomain: "foo",
|
||||
ServiceCIDR: "foo",
|
||||
data: struct{ DNSDomain, Federation, UpstreamNameserver, StubDomain string }{
|
||||
DNSDomain: "foo",
|
||||
Federation: "foo",
|
||||
UpstreamNameserver: "foo",
|
||||
StubDomain: "foo",
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
@ -175,3 +181,238 @@ func TestGetDNSIP(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTranslateStubDomainKubeDNSToCoreDNS(t *testing.T) {
|
||||
testCases := []struct {
|
||||
configMap *v1.ConfigMap
|
||||
expectOne string
|
||||
expectTwo string
|
||||
}{
|
||||
{
|
||||
configMap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kube-dns",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"stubDomains": `{"foo.com" : ["1.2.3.4:5300","3.3.3.3"], "my.cluster.local" : ["2.3.4.5"]}`,
|
||||
"upstreamNameservers": `["8.8.8.8", "8.8.4.4"]`,
|
||||
},
|
||||
},
|
||||
|
||||
expectOne: `
|
||||
foo.com:53 {
|
||||
errors
|
||||
cache 30
|
||||
proxy . 1.2.3.4:5300 3.3.3.3
|
||||
}
|
||||
|
||||
my.cluster.local:53 {
|
||||
errors
|
||||
cache 30
|
||||
proxy . 2.3.4.5
|
||||
}`,
|
||||
expectTwo: `
|
||||
my.cluster.local:53 {
|
||||
errors
|
||||
cache 30
|
||||
proxy . 2.3.4.5
|
||||
}
|
||||
|
||||
foo.com:53 {
|
||||
errors
|
||||
cache 30
|
||||
proxy . 1.2.3.4:5300 3.3.3.3
|
||||
}`,
|
||||
},
|
||||
{
|
||||
configMap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kubedns",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
},
|
||||
|
||||
expectOne: "",
|
||||
},
|
||||
{
|
||||
configMap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kube-dns",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"stubDomains": `{"foo.com" : ["1.2.3.4:5300"], "my.cluster.local" : ["2.3.4.5"]}`,
|
||||
"upstreamNameservers": `["8.8.8.8", "8.8.4.4"]`,
|
||||
},
|
||||
},
|
||||
|
||||
expectOne: `
|
||||
foo.com:53 {
|
||||
errors
|
||||
cache 30
|
||||
proxy . 1.2.3.4:5300
|
||||
}
|
||||
|
||||
my.cluster.local:53 {
|
||||
errors
|
||||
cache 30
|
||||
proxy . 2.3.4.5
|
||||
}`,
|
||||
expectTwo: `
|
||||
my.cluster.local:53 {
|
||||
errors
|
||||
cache 30
|
||||
proxy . 2.3.4.5
|
||||
}
|
||||
|
||||
foo.com:53 {
|
||||
errors
|
||||
cache 30
|
||||
proxy . 1.2.3.4:5300
|
||||
}`,
|
||||
},
|
||||
{
|
||||
configMap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kube-dns",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"upstreamNameservers": `["8.8.8.8", "8.8.4.4"]`,
|
||||
},
|
||||
},
|
||||
|
||||
expectOne: "",
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
out, err := translateStubDomainOfKubeDNSToProxyCoreDNS(kubeDNSStubDomain, testCase.configMap)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !strings.Contains(out, testCase.expectOne) && !strings.Contains(out, testCase.expectTwo) {
|
||||
t.Errorf("expected to find %q or %q in output: %q", testCase.expectOne, testCase.expectTwo, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTranslateUpstreamKubeDNSToCoreDNS(t *testing.T) {
|
||||
testCases := []struct {
|
||||
configMap *v1.ConfigMap
|
||||
expect string
|
||||
}{
|
||||
{
|
||||
configMap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kube-dns",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
},
|
||||
|
||||
expect: "/etc/resolv.conf",
|
||||
},
|
||||
{
|
||||
configMap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kubedns",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"stubDomains": ` {"foo.com" : ["1.2.3.4:5300"], "my.cluster.local" : ["2.3.4.5"]}`,
|
||||
"upstreamNameservers": `["8.8.8.8", "8.8.4.4", "4.4.4.4"]`,
|
||||
},
|
||||
},
|
||||
|
||||
expect: "8.8.8.8 8.8.4.4 4.4.4.4",
|
||||
},
|
||||
{
|
||||
configMap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kubedns",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"upstreamNameservers": `["8.8.8.8", "8.8.4.4"]`,
|
||||
},
|
||||
},
|
||||
|
||||
expect: "8.8.8.8 8.8.4.4",
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
out, err := translateUpstreamNameServerOfKubeDNSToUpstreamProxyCoreDNS(kubeDNSUpstreamNameservers, testCase.configMap)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !strings.Contains(out, testCase.expect) {
|
||||
t.Errorf("expected to find %q in output: %q", testCase.expect, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTranslateFederationKubeDNSToCoreDNS(t *testing.T) {
|
||||
testCases := []struct {
|
||||
configMap *v1.ConfigMap
|
||||
expectOne string
|
||||
expectTwo string
|
||||
}{
|
||||
{
|
||||
configMap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kube-dns",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"federations": `{"foo" : "foo.feddomain.com", "bar" : "bar.feddomain.com"}`,
|
||||
"stubDomains": `{"foo.com" : ["1.2.3.4:5300","3.3.3.3"], "my.cluster.local" : ["2.3.4.5"]}`,
|
||||
"upstreamNameservers": `["8.8.8.8", "8.8.4.4"]`,
|
||||
},
|
||||
},
|
||||
|
||||
expectOne: `
|
||||
federation cluster.local {
|
||||
foo foo.feddomain.com
|
||||
bar bar.feddomain.com
|
||||
}`,
|
||||
expectTwo: `
|
||||
federation cluster.local {
|
||||
bar bar.feddomain.com
|
||||
foo foo.feddomain.com
|
||||
}`,
|
||||
},
|
||||
{
|
||||
configMap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kubedns",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
},
|
||||
|
||||
expectOne: "",
|
||||
},
|
||||
{
|
||||
configMap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kube-dns",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"stubDomains": `{"foo.com" : ["1.2.3.4:5300"], "my.cluster.local" : ["2.3.4.5"]}`,
|
||||
"upstreamNameservers": `["8.8.8.8", "8.8.4.4"]`,
|
||||
},
|
||||
},
|
||||
|
||||
expectOne: "",
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
out, err := translateFederationsofKubeDNSToCoreDNS(kubeDNSFederation, "cluster.local", testCase.configMap)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !strings.Contains(out, testCase.expectOne) && !strings.Contains(out, testCase.expectTwo) {
|
||||
t.Errorf("expected to find %q or %q in output: %q", testCase.expectOne, testCase.expectTwo, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
56
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns/manifests.go
generated
vendored
56
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns/manifests.go
generated
vendored
@ -17,8 +17,8 @@ limitations under the License.
|
||||
package dns
|
||||
|
||||
const (
|
||||
// v180AndAboveKubeDNSDeployment is the kube-dns Deployment manifest for the kube-dns manifest for v1.7+
|
||||
v180AndAboveKubeDNSDeployment = `
|
||||
// KubeDNSDeployment is the kube-dns Deployment manifest for the kube-dns manifest for v1.7+
|
||||
KubeDNSDeployment = `
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
@ -173,16 +173,8 @@ spec:
|
||||
operator: Exists
|
||||
- key: {{ .MasterTaintKey }}
|
||||
effect: NoSchedule
|
||||
# TODO: Remove this affinity field as soon as we are using manifest lists
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: beta.kubernetes.io/arch
|
||||
operator: In
|
||||
values:
|
||||
- {{ .Arch }}
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/arch: {{ .Arch }}
|
||||
`
|
||||
|
||||
// KubeDNSService is the kube-dns Service manifest
|
||||
@ -196,6 +188,9 @@ metadata:
|
||||
kubernetes.io/name: "KubeDNS"
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/port: "9153"
|
||||
prometheus.io/scrape: "true"
|
||||
# Without this resourceVersion value, an update of the Service between versions will yield:
|
||||
# Service "kube-dns" is invalid: metadata.resourceVersion: Invalid value: "": must be specified for an update
|
||||
resourceVersion: "0"
|
||||
@ -243,21 +238,9 @@ spec:
|
||||
operator: Exists
|
||||
- key: {{ .MasterTaintKey }}
|
||||
effect: NoSchedule
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: k8s-app
|
||||
operator: In
|
||||
values:
|
||||
- coredns
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: coredns
|
||||
image: coredns/coredns:{{ .Version }}
|
||||
image: {{ .ImageRepository }}/coredns:{{ .Version }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
@ -269,6 +252,7 @@ spec:
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/coredns
|
||||
readOnly: true
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
@ -276,6 +260,9 @@ spec:
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
- containerPort: 9153
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
@ -285,6 +272,14 @@ spec:
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
dnsPolicy: Default
|
||||
volumes:
|
||||
- name: config-volume
|
||||
@ -307,15 +302,16 @@ data:
|
||||
.:53 {
|
||||
errors
|
||||
health
|
||||
kubernetes {{ .DNSDomain }} {{ .ServiceCIDR }} {
|
||||
kubernetes {{ .DNSDomain }} in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
upstream /etc/resolv.conf
|
||||
upstream
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
}{{ .Federation }}
|
||||
prometheus :9153
|
||||
proxy . /etc/resolv.conf
|
||||
proxy . {{ .UpstreamNameserver }}
|
||||
cache 30
|
||||
}
|
||||
reload
|
||||
}{{ .StubDomain }}
|
||||
`
|
||||
// CoreDNSClusterRole is the CoreDNS ClusterRole manifest
|
||||
CoreDNSClusterRole = `
|
||||
|
57
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns/versions.go
generated
vendored
57
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns/versions.go
generated
vendored
@ -1,57 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/pkg/util/version"
|
||||
)
|
||||
|
||||
const (
|
||||
kubeDNSv190AndAboveVersion = "1.14.8"
|
||||
coreDNSVersion = "1.0.4"
|
||||
)
|
||||
|
||||
// GetDNSVersion returns the right kube-dns version for a specific k8s version
|
||||
func GetDNSVersion(kubeVersion *version.Version, dns string) string {
|
||||
// v1.9.0+ uses kube-dns 1.14.8
|
||||
// v1.9.0+ uses CoreDNS 1.0.4 if feature gate "CoreDNS" is enabled.
|
||||
|
||||
// In the future when the version is bumped at HEAD; add conditional logic to return the right versions
|
||||
// Also, the version might be bumped for different k8s releases on the same branch
|
||||
switch dns {
|
||||
case kubeadmconstants.CoreDNS:
|
||||
// return the CoreDNS version
|
||||
return coreDNSVersion
|
||||
default:
|
||||
return kubeDNSv190AndAboveVersion
|
||||
}
|
||||
}
|
||||
|
||||
// GetKubeDNSManifest returns the right kube-dns YAML manifest for a specific k8s version
|
||||
func GetKubeDNSManifest(kubeVersion *version.Version) string {
|
||||
// v1.8.0+ has only one known YAML manifest spec, just return that here
|
||||
// In the future when the kube-dns version is bumped at HEAD; add conditional logic to return the right manifest
|
||||
return v180AndAboveKubeDNSDeployment
|
||||
}
|
||||
|
||||
// GetCoreDNSManifest returns the right CoreDNS YAML manifest for a specific k8s version
|
||||
func GetCoreDNSManifest(kubeVersion *version.Version) string {
|
||||
// v1.9.0+ has only one known YAML manifest spec, just return that here
|
||||
// In the future when the CoreDNS version is bumped at HEAD; add conditional logic to return the right manifest
|
||||
return CoreDNSDeployment
|
||||
}
|
68
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns/versions_test.go
generated
vendored
68
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns/versions_test.go
generated
vendored
@ -1,68 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/pkg/util/version"
|
||||
)
|
||||
|
||||
func TestGetKubeDNSVersion(t *testing.T) {
|
||||
var tests = []struct {
|
||||
k8sVersion string
|
||||
dns string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
k8sVersion: "v1.9.0",
|
||||
dns: kubeadmconstants.KubeDNS,
|
||||
expected: kubeDNSv190AndAboveVersion,
|
||||
},
|
||||
{
|
||||
k8sVersion: "v1.10.0",
|
||||
dns: kubeadmconstants.KubeDNS,
|
||||
expected: kubeDNSv190AndAboveVersion,
|
||||
},
|
||||
{
|
||||
k8sVersion: "v1.9.0",
|
||||
dns: kubeadmconstants.CoreDNS,
|
||||
expected: coreDNSVersion,
|
||||
},
|
||||
{
|
||||
k8sVersion: "v1.10.0",
|
||||
dns: kubeadmconstants.CoreDNS,
|
||||
expected: coreDNSVersion,
|
||||
},
|
||||
}
|
||||
for _, rt := range tests {
|
||||
k8sVersion, err := version.ParseSemantic(rt.k8sVersion)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't parse kubernetes version %q: %v", rt.k8sVersion, err)
|
||||
}
|
||||
|
||||
actualDNSVersion := GetDNSVersion(k8sVersion, rt.dns)
|
||||
if actualDNSVersion != rt.expected {
|
||||
t.Errorf(
|
||||
"failed GetDNSVersion:\n\texpected: %s\n\t actual: %s",
|
||||
rt.expected,
|
||||
actualDNSVersion,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
6
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/proxy/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/proxy/BUILD
generated
vendored
@ -11,7 +11,7 @@ go_test(
|
||||
srcs = ["proxy_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library",
|
||||
"//cmd/kubeadm/app/apis/kubeadm/v1alpha2:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//cmd/kubeadm/app/util/config:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
@ -34,19 +34,17 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/proxy",
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//cmd/kubeadm/app/util/apiclient:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/proxy/apis/kubeproxyconfig/scheme:go_default_library",
|
||||
"//pkg/proxy/apis/kubeproxyconfig/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
15
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/proxy/manifests.go
generated
vendored
15
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/proxy/manifests.go
generated
vendored
@ -69,7 +69,10 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-proxy
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
containers:
|
||||
- name: kube-proxy
|
||||
image: {{ if .ImageOverride }}{{ .ImageOverride }}{{ else }}{{ .ImageRepository }}/kube-proxy-{{ .Arch }}:{{ .Version }}{{ end }}
|
||||
@ -90,12 +93,6 @@ spec:
|
||||
readOnly: true
|
||||
hostNetwork: true
|
||||
serviceAccountName: kube-proxy
|
||||
tolerations:
|
||||
- key: {{ .MasterTaintKey }}
|
||||
effect: NoSchedule
|
||||
- key: {{ .CloudTaintKey }}
|
||||
value: "true"
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: kube-proxy
|
||||
configMap:
|
||||
@ -107,5 +104,11 @@ spec:
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- operator: Exists
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/arch: {{ .Arch }}
|
||||
`
|
||||
)
|
||||
|
14
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/proxy/proxy.go
generated
vendored
14
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/proxy/proxy.go
generated
vendored
@ -27,14 +27,12 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kuberuntime "k8s.io/apimachinery/pkg/runtime"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
kubeproxyconfigscheme "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme"
|
||||
kubeproxyconfigv1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -53,7 +51,7 @@ func EnsureProxyAddon(cfg *kubeadmapi.MasterConfiguration, client clientset.Inte
|
||||
}
|
||||
|
||||
// Generate Master Enpoint kubeconfig file
|
||||
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(cfg)
|
||||
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(&cfg.API)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -77,13 +75,11 @@ func EnsureProxyAddon(cfg *kubeadmapi.MasterConfiguration, client clientset.Inte
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when parsing kube-proxy configmap template: %v", err)
|
||||
}
|
||||
proxyDaemonSetBytes, err = kubeadmutil.ParseTemplate(KubeProxyDaemonSet19, struct{ ImageRepository, Arch, Version, ImageOverride, MasterTaintKey, CloudTaintKey string }{
|
||||
proxyDaemonSetBytes, err = kubeadmutil.ParseTemplate(KubeProxyDaemonSet19, struct{ ImageRepository, Arch, Version, ImageOverride string }{
|
||||
ImageRepository: cfg.GetControlPlaneImageRepository(),
|
||||
Arch: runtime.GOARCH,
|
||||
Version: kubeadmutil.KubernetesVersionToImageTag(cfg.KubernetesVersion),
|
||||
ImageOverride: cfg.UnifiedControlPlaneImage,
|
||||
MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster,
|
||||
CloudTaintKey: algorithm.TaintExternalCloudProvider,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when parsing kube-proxy daemonset template: %v", err)
|
||||
@ -117,7 +113,7 @@ func CreateRBACRules(client clientset.Interface) error {
|
||||
|
||||
func createKubeProxyAddon(configMapBytes, daemonSetbytes []byte, client clientset.Interface) error {
|
||||
kubeproxyConfigMap := &v1.ConfigMap{}
|
||||
if err := kuberuntime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), configMapBytes, kubeproxyConfigMap); err != nil {
|
||||
if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), configMapBytes, kubeproxyConfigMap); err != nil {
|
||||
return fmt.Errorf("unable to decode kube-proxy configmap %v", err)
|
||||
}
|
||||
|
||||
@ -127,7 +123,7 @@ func createKubeProxyAddon(configMapBytes, daemonSetbytes []byte, client clientse
|
||||
}
|
||||
|
||||
kubeproxyDaemonSet := &apps.DaemonSet{}
|
||||
if err := kuberuntime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), daemonSetbytes, kubeproxyDaemonSet); err != nil {
|
||||
if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), daemonSetbytes, kubeproxyDaemonSet); err != nil {
|
||||
return fmt.Errorf("unable to decode kube-proxy daemonset %v", err)
|
||||
}
|
||||
|
||||
|
14
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/proxy/proxy_test.go
generated
vendored
14
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/proxy/proxy_test.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientsetfake "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
|
||||
kubeadmapiv1alpha2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha2"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
@ -174,12 +174,12 @@ func TestEnsureProxyAddon(t *testing.T) {
|
||||
// Create a fake client and set up default test configuration
|
||||
client := clientsetfake.NewSimpleClientset()
|
||||
|
||||
masterConfig := &kubeadmapiext.MasterConfiguration{
|
||||
API: kubeadmapiext.API{
|
||||
masterConfig := &kubeadmapiv1alpha2.MasterConfiguration{
|
||||
API: kubeadmapiv1alpha2.API{
|
||||
AdvertiseAddress: "1.2.3.4",
|
||||
BindPort: 1234,
|
||||
},
|
||||
KubeProxy: kubeadmapiext.KubeProxy{
|
||||
KubeProxy: kubeadmapiv1alpha2.KubeProxy{
|
||||
Config: &kubeproxyconfigv1alpha1.KubeProxyConfiguration{
|
||||
BindAddress: "",
|
||||
HealthzBindAddress: "0.0.0.0:10256",
|
||||
@ -193,11 +193,11 @@ func TestEnsureProxyAddon(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
Networking: kubeadmapiext.Networking{
|
||||
Networking: kubeadmapiv1alpha2.Networking{
|
||||
PodSubnet: "5.6.7.8/24",
|
||||
},
|
||||
ImageRepository: "someRepo",
|
||||
KubernetesVersion: "v1.9.0",
|
||||
KubernetesVersion: "v1.10.0",
|
||||
UnifiedControlPlaneImage: "someImage",
|
||||
}
|
||||
|
||||
@ -214,7 +214,7 @@ func TestEnsureProxyAddon(t *testing.T) {
|
||||
masterConfig.Networking.PodSubnet = "2001:101::/96"
|
||||
}
|
||||
|
||||
kubeadmapiext.SetDefaults_MasterConfiguration(masterConfig)
|
||||
kubeadmapiv1alpha2.SetDefaults_MasterConfiguration(masterConfig)
|
||||
intMaster, err := cmdutil.ConfigFileAndDefaultsToInternalConfig("", masterConfig)
|
||||
if err != nil {
|
||||
t.Errorf(" test failed to convert v1alpha1 to internal version")
|
||||
|
1
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo/BUILD
generated
vendored
@ -26,6 +26,7 @@ go_library(
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/util/apiclient:go_default_library",
|
||||
"//pkg/apis/rbac/v1:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
@ -19,6 +19,7 @@ package clusterinfo
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -39,8 +40,9 @@ const (
|
||||
// CreateBootstrapConfigMapIfNotExists creates the kube-public ConfigMap if it doesn't exist already
|
||||
func CreateBootstrapConfigMapIfNotExists(client clientset.Interface, file string) error {
|
||||
|
||||
fmt.Printf("[bootstraptoken] Creating the %q ConfigMap in the %q namespace\n", bootstrapapi.ConfigMapClusterInfo, metav1.NamespacePublic)
|
||||
fmt.Printf("[bootstraptoken] creating the %q ConfigMap in the %q namespace\n", bootstrapapi.ConfigMapClusterInfo, metav1.NamespacePublic)
|
||||
|
||||
glog.V(1).Infoln("[bootstraptoken] loading admin kubeconfig")
|
||||
adminConfig, err := clientcmd.LoadFromFile(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load admin kubeconfig [%v]", err)
|
||||
@ -48,6 +50,7 @@ func CreateBootstrapConfigMapIfNotExists(client clientset.Interface, file string
|
||||
|
||||
adminCluster := adminConfig.Contexts[adminConfig.CurrentContext].Cluster
|
||||
// Copy the cluster from admin.conf to the bootstrap kubeconfig, contains the CA cert and the server URL
|
||||
glog.V(1).Infoln("[bootstraptoken] copying the cluster from admin.conf to the bootstrap kubeconfig")
|
||||
bootstrapConfig := &clientcmdapi.Config{
|
||||
Clusters: map[string]*clientcmdapi.Cluster{
|
||||
"": adminConfig.Clusters[adminCluster],
|
||||
@ -59,6 +62,7 @@ func CreateBootstrapConfigMapIfNotExists(client clientset.Interface, file string
|
||||
}
|
||||
|
||||
// Create or update the ConfigMap in the kube-public namespace
|
||||
glog.V(1).Infoln("[bootstraptoken] creating/updating ConfigMap in kube-public namespace")
|
||||
return apiclient.CreateOrUpdateConfigMap(client, &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: bootstrapapi.ConfigMapClusterInfo,
|
||||
@ -72,6 +76,7 @@ func CreateBootstrapConfigMapIfNotExists(client clientset.Interface, file string
|
||||
|
||||
// CreateClusterInfoRBACRules creates the RBAC rules for exposing the cluster-info ConfigMap in the kube-public namespace to unauthenticated users
|
||||
func CreateClusterInfoRBACRules(client clientset.Interface) error {
|
||||
glog.V(1).Infoln("creating the RBAC rules for exposing the cluster-info ConfigMap in the kube-public namespace")
|
||||
err := apiclient.CreateOrUpdateRole(client, &rbac.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: BootstrapSignerClusterRoleName,
|
||||
|
13
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node/BUILD
generated
vendored
13
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node/BUILD
generated
vendored
@ -3,14 +3,6 @@ package(default_visibility = ["//visibility:public"])
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["token_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//cmd/kubeadm/app/apis/kubeadm:go_default_library"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
@ -21,15 +13,12 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node",
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/util/apiclient:go_default_library",
|
||||
"//cmd/kubeadm/app/util/token:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/bootstrap/token/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/bootstrap/token/util:go_default_library",
|
||||
],
|
||||
)
|
||||
|
6
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node/tlsbootstrap.go
generated
vendored
6
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node/tlsbootstrap.go
generated
vendored
@ -47,7 +47,7 @@ const (
|
||||
|
||||
// AllowBootstrapTokensToPostCSRs creates RBAC rules in a way the makes Node Bootstrap Tokens able to post CSRs
|
||||
func AllowBootstrapTokensToPostCSRs(client clientset.Interface) error {
|
||||
fmt.Println("[bootstraptoken] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials")
|
||||
fmt.Println("[bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials")
|
||||
|
||||
return apiclient.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -69,7 +69,7 @@ func AllowBootstrapTokensToPostCSRs(client clientset.Interface) error {
|
||||
|
||||
// AutoApproveNodeBootstrapTokens creates RBAC rules in a way that makes Node Bootstrap Tokens' CSR auto-approved by the csrapprover controller
|
||||
func AutoApproveNodeBootstrapTokens(client clientset.Interface) error {
|
||||
fmt.Println("[bootstraptoken] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token")
|
||||
fmt.Println("[bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token")
|
||||
|
||||
// Always create this kubeadm-specific binding though
|
||||
return apiclient.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
|
||||
@ -92,7 +92,7 @@ func AutoApproveNodeBootstrapTokens(client clientset.Interface) error {
|
||||
|
||||
// AutoApproveNodeCertificateRotation creates RBAC rules in a way that makes Node certificate rotation CSR auto-approved by the csrapprover controller
|
||||
func AutoApproveNodeCertificateRotation(client clientset.Interface) error {
|
||||
fmt.Println("[bootstraptoken] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster")
|
||||
fmt.Println("[bootstraptoken] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster")
|
||||
|
||||
return apiclient.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
114
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node/token.go
generated
vendored
114
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node/token.go
generated
vendored
@ -18,109 +18,43 @@ package node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
|
||||
bootstraputil "k8s.io/client-go/tools/bootstrap/token/util"
|
||||
tokenutil "k8s.io/kubernetes/cmd/kubeadm/app/util/token"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
)
|
||||
|
||||
const tokenCreateRetries = 5
|
||||
// TODO(mattmoyer): Move CreateNewTokens, UpdateOrCreateTokens out of this package to client-go for a generic abstraction and client for a Bootstrap Token
|
||||
|
||||
// TODO(mattmoyer): Move CreateNewToken, UpdateOrCreateToken and encodeTokenSecretData out of this package to client-go for a generic abstraction and client for a Bootstrap Token
|
||||
|
||||
// CreateNewToken tries to create a token and fails if one with the same ID already exists
|
||||
func CreateNewToken(client clientset.Interface, token string, tokenDuration time.Duration, usages []string, extraGroups []string, description string) error {
|
||||
return UpdateOrCreateToken(client, token, true, tokenDuration, usages, extraGroups, description)
|
||||
// CreateNewTokens tries to create a token and fails if one with the same ID already exists
|
||||
func CreateNewTokens(client clientset.Interface, tokens []kubeadmapi.BootstrapToken) error {
|
||||
return UpdateOrCreateTokens(client, true, tokens)
|
||||
}
|
||||
|
||||
// UpdateOrCreateToken attempts to update a token with the given ID, or create if it does not already exist.
|
||||
func UpdateOrCreateToken(client clientset.Interface, token string, failIfExists bool, tokenDuration time.Duration, usages []string, extraGroups []string, description string) error {
|
||||
tokenID, tokenSecret, err := tokenutil.ParseToken(token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
secretName := fmt.Sprintf("%s%s", bootstrapapi.BootstrapTokenSecretPrefix, tokenID)
|
||||
var lastErr error
|
||||
for i := 0; i < tokenCreateRetries; i++ {
|
||||
// UpdateOrCreateTokens attempts to update a token with the given ID, or create if it does not already exist.
|
||||
func UpdateOrCreateTokens(client clientset.Interface, failIfExists bool, tokens []kubeadmapi.BootstrapToken) error {
|
||||
|
||||
for _, token := range tokens {
|
||||
|
||||
secretName := bootstraputil.BootstrapTokenSecretName(token.Token.ID)
|
||||
secret, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Get(secretName, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
if failIfExists {
|
||||
return fmt.Errorf("a token with id %q already exists", tokenID)
|
||||
}
|
||||
// Secret with this ID already exists, update it:
|
||||
tokenSecretData, err := encodeTokenSecretData(tokenID, tokenSecret, tokenDuration, usages, extraGroups, description)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
secret.Data = tokenSecretData
|
||||
if _, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Update(secret); err == nil {
|
||||
return nil
|
||||
}
|
||||
lastErr = err
|
||||
continue
|
||||
if secret != nil && err == nil && failIfExists {
|
||||
return fmt.Errorf("a token with id %q already exists", token.Token.ID)
|
||||
}
|
||||
|
||||
// Secret does not already exist:
|
||||
if apierrors.IsNotFound(err) {
|
||||
tokenSecretData, err := encodeTokenSecretData(tokenID, tokenSecret, tokenDuration, usages, extraGroups, description)
|
||||
if err != nil {
|
||||
return err
|
||||
updatedOrNewSecret := token.ToSecret()
|
||||
// Try to create or update the token with an exponential backoff
|
||||
err = apiclient.TryRunCommand(func() error {
|
||||
if err := apiclient.CreateOrUpdateSecret(client, updatedOrNewSecret); err != nil {
|
||||
return fmt.Errorf("failed to create or update bootstrap token with name %s: %v", secretName, err)
|
||||
}
|
||||
|
||||
secret = &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: secretName,
|
||||
},
|
||||
Type: v1.SecretType(bootstrapapi.SecretTypeBootstrapToken),
|
||||
Data: tokenSecretData,
|
||||
}
|
||||
if _, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret); err == nil {
|
||||
return nil
|
||||
}
|
||||
lastErr = err
|
||||
continue
|
||||
return nil
|
||||
}, 5)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
return fmt.Errorf(
|
||||
"unable to create bootstrap token after %d attempts [%v]",
|
||||
tokenCreateRetries,
|
||||
lastErr,
|
||||
)
|
||||
}
|
||||
|
||||
// encodeTokenSecretData takes the token discovery object and an optional duration and returns the .Data for the Secret
|
||||
func encodeTokenSecretData(tokenID, tokenSecret string, duration time.Duration, usages []string, extraGroups []string, description string) (map[string][]byte, error) {
|
||||
data := map[string][]byte{
|
||||
bootstrapapi.BootstrapTokenIDKey: []byte(tokenID),
|
||||
bootstrapapi.BootstrapTokenSecretKey: []byte(tokenSecret),
|
||||
}
|
||||
|
||||
if len(extraGroups) > 0 {
|
||||
data[bootstrapapi.BootstrapTokenExtraGroupsKey] = []byte(strings.Join(extraGroups, ","))
|
||||
}
|
||||
|
||||
if duration > 0 {
|
||||
// Get the current time, add the specified duration, and format it accordingly
|
||||
durationString := time.Now().Add(duration).Format(time.RFC3339)
|
||||
data[bootstrapapi.BootstrapTokenExpirationKey] = []byte(durationString)
|
||||
}
|
||||
if len(description) > 0 {
|
||||
data[bootstrapapi.BootstrapTokenDescriptionKey] = []byte(description)
|
||||
}
|
||||
|
||||
// validate usages
|
||||
if err := bootstraputil.ValidateUsages(usages); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, usage := range usages {
|
||||
data[bootstrapapi.BootstrapTokenUsagePrefix+usage] = []byte("true")
|
||||
}
|
||||
return data, nil
|
||||
return nil
|
||||
}
|
||||
|
59
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node/token_test.go
generated
vendored
59
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node/token_test.go
generated
vendored
@ -1,59 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package node
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
)
|
||||
|
||||
func TestEncodeTokenSecretData(t *testing.T) {
|
||||
var tests = []struct {
|
||||
token *kubeadmapi.TokenDiscovery
|
||||
t time.Duration
|
||||
}{
|
||||
{token: &kubeadmapi.TokenDiscovery{ID: "foo", Secret: "bar"}}, // should use default
|
||||
{token: &kubeadmapi.TokenDiscovery{ID: "foo", Secret: "bar"}, t: time.Second}, // should use default
|
||||
}
|
||||
for _, rt := range tests {
|
||||
actual, _ := encodeTokenSecretData(rt.token.ID, rt.token.Secret, rt.t, []string{}, []string{}, "")
|
||||
if !bytes.Equal(actual["token-id"], []byte(rt.token.ID)) {
|
||||
t.Errorf(
|
||||
"failed EncodeTokenSecretData:\n\texpected: %s\n\t actual: %s",
|
||||
rt.token.ID,
|
||||
actual["token-id"],
|
||||
)
|
||||
}
|
||||
if !bytes.Equal(actual["token-secret"], []byte(rt.token.Secret)) {
|
||||
t.Errorf(
|
||||
"failed EncodeTokenSecretData:\n\texpected: %s\n\t actual: %s",
|
||||
rt.token.Secret,
|
||||
actual["token-secret"],
|
||||
)
|
||||
}
|
||||
if rt.t > 0 {
|
||||
if actual["expiration"] == nil {
|
||||
t.Errorf(
|
||||
"failed EncodeTokenSecretData, duration was not added to time",
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
1
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/BUILD
generated
vendored
@ -30,6 +30,7 @@ go_library(
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/certs/pkiutil:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/cert:go_default_library",
|
||||
],
|
||||
)
|
||||
|
188
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/certs.go
generated
vendored
188
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/certs.go
generated
vendored
@ -23,6 +23,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
@ -32,18 +34,26 @@ import (
|
||||
// CreatePKIAssets will create and write to disk all PKI assets necessary to establish the control plane.
|
||||
// If the PKI assets already exists in the target folder, they are used only if evaluated equal; otherwise an error is returned.
|
||||
func CreatePKIAssets(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
|
||||
glog.V(1).Infoln("creating PKI assets")
|
||||
certActions := []func(cfg *kubeadmapi.MasterConfiguration) error{
|
||||
CreateCACertAndKeyfiles,
|
||||
CreateCACertAndKeyFiles,
|
||||
CreateAPIServerCertAndKeyFiles,
|
||||
CreateAPIServerKubeletClientCertAndKeyFiles,
|
||||
CreateEtcdServerCertAndKeyFiles,
|
||||
CreateEtcdPeerCertAndKeyFiles,
|
||||
CreateAPIServerEtcdClientCertAndKeyFiles,
|
||||
CreateServiceAccountKeyAndPublicKeyFiles,
|
||||
CreateFrontProxyCACertAndKeyFiles,
|
||||
CreateFrontProxyClientCertAndKeyFiles,
|
||||
}
|
||||
etcdCertActions := []func(cfg *kubeadmapi.MasterConfiguration) error{
|
||||
CreateEtcdCACertAndKeyFiles,
|
||||
CreateEtcdServerCertAndKeyFiles,
|
||||
CreateEtcdPeerCertAndKeyFiles,
|
||||
CreateEtcdHealthcheckClientCertAndKeyFiles,
|
||||
CreateAPIServerEtcdClientCertAndKeyFiles,
|
||||
}
|
||||
|
||||
if cfg.Etcd.Local != nil {
|
||||
certActions = append(certActions, etcdCertActions...)
|
||||
}
|
||||
|
||||
for _, action := range certActions {
|
||||
err := action(cfg)
|
||||
@ -52,15 +62,15 @@ func CreatePKIAssets(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("[certificates] Valid certificates and keys now exist in %q\n", cfg.CertificatesDir)
|
||||
fmt.Printf("[certificates] valid certificates and keys now exist in %q\n", cfg.CertificatesDir)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateCACertAndKeyfiles create a new self signed CA certificate and key files.
|
||||
// CreateCACertAndKeyFiles create a new self signed cluster CA certificate and key files.
|
||||
// If the CA certificate and key files already exists in the target folder, they are used only if evaluated equal; otherwise an error is returned.
|
||||
func CreateCACertAndKeyfiles(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
|
||||
func CreateCACertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
glog.V(1).Infoln("create a new self signed cluster CA certificate and key files")
|
||||
caCert, caKey, err := NewCACertAndKey()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -76,9 +86,9 @@ func CreateCACertAndKeyfiles(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
|
||||
// CreateAPIServerCertAndKeyFiles create a new certificate and key files for the apiserver.
|
||||
// If the apiserver certificate and key files already exists in the target folder, they are used only if evaluated equal; otherwise an error is returned.
|
||||
// It assumes the cluster CA certificate and key files should exists into the CertificatesDir
|
||||
// It assumes the cluster CA certificate and key files exist in the CertificatesDir.
|
||||
func CreateAPIServerCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
|
||||
glog.V(1).Infoln("creating a new certificate and key files for the apiserver")
|
||||
caCert, caKey, err := loadCertificateAuthority(cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -98,11 +108,11 @@ func CreateAPIServerCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
)
|
||||
}
|
||||
|
||||
// CreateAPIServerKubeletClientCertAndKeyFiles create a new CA certificate for kubelets calling apiserver
|
||||
// CreateAPIServerKubeletClientCertAndKeyFiles create a new certificate for kubelets calling apiserver.
|
||||
// If the apiserver-kubelet-client certificate and key files already exists in the target folder, they are used only if evaluated equals; otherwise an error is returned.
|
||||
// It assumes the cluster CA certificate and key files should exists into the CertificatesDir
|
||||
// It assumes the cluster CA certificate and key files exist in the CertificatesDir.
|
||||
func CreateAPIServerKubeletClientCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
|
||||
glog.V(1).Infoln("creating a new certificate for kubelets calling apiserver")
|
||||
caCert, caKey, err := loadCertificateAuthority(cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -122,17 +132,36 @@ func CreateAPIServerKubeletClientCertAndKeyFiles(cfg *kubeadmapi.MasterConfigura
|
||||
)
|
||||
}
|
||||
|
||||
// CreateEtcdServerCertAndKeyFiles create a new certificate and key file for etcd.
|
||||
// If the etcd serving certificate and key file already exist in the target folder, they are used only if evaluated equal; otherwise an error is returned.
|
||||
// It assumes the cluster CA certificate and key file exist in the CertificatesDir
|
||||
func CreateEtcdServerCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
|
||||
caCert, caKey, err := loadCertificateAuthority(cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName)
|
||||
// CreateEtcdCACertAndKeyFiles create a self signed etcd CA certificate and key files.
|
||||
// The etcd CA and client certs are used to secure communication between etcd peers and connections to etcd from the API server.
|
||||
// This is a separate CA, so that kubernetes client identities cannot connect to etcd directly or peer with the etcd cluster.
|
||||
// If the etcd CA certificate and key files already exists in the target folder, they are used only if evaluated equals; otherwise an error is returned.
|
||||
func CreateEtcdCACertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
glog.V(1).Infoln("creating a self signed etcd CA certificate and key files")
|
||||
etcdCACert, etcdCAKey, err := NewEtcdCACertAndKey()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
etcdServerCert, etcdServerKey, err := NewEtcdServerCertAndKey(cfg, caCert, caKey)
|
||||
return writeCertificateAuthorithyFilesIfNotExist(
|
||||
cfg.CertificatesDir,
|
||||
kubeadmconstants.EtcdCACertAndKeyBaseName,
|
||||
etcdCACert,
|
||||
etcdCAKey,
|
||||
)
|
||||
}
|
||||
|
||||
// CreateEtcdServerCertAndKeyFiles create a new certificate and key file for etcd.
|
||||
// If the etcd serving certificate and key file already exist in the target folder, they are used only if evaluated equal; otherwise an error is returned.
|
||||
// It assumes the etcd CA certificate and key file exist in the CertificatesDir
|
||||
func CreateEtcdServerCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
glog.V(1).Infoln("creating a new server certificate and key files for etcd")
|
||||
etcdCACert, etcdCAKey, err := loadCertificateAuthority(cfg.CertificatesDir, kubeadmconstants.EtcdCACertAndKeyBaseName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
etcdServerCert, etcdServerKey, err := NewEtcdServerCertAndKey(cfg, etcdCACert, etcdCAKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -140,7 +169,7 @@ func CreateEtcdServerCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error
|
||||
return writeCertificateFilesIfNotExist(
|
||||
cfg.CertificatesDir,
|
||||
kubeadmconstants.EtcdServerCertAndKeyBaseName,
|
||||
caCert,
|
||||
etcdCACert,
|
||||
etcdServerCert,
|
||||
etcdServerKey,
|
||||
)
|
||||
@ -148,15 +177,15 @@ func CreateEtcdServerCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error
|
||||
|
||||
// CreateEtcdPeerCertAndKeyFiles create a new certificate and key file for etcd peering.
|
||||
// If the etcd peer certificate and key file already exist in the target folder, they are used only if evaluated equal; otherwise an error is returned.
|
||||
// It assumes the cluster CA certificate and key file exist in the CertificatesDir
|
||||
// It assumes the etcd CA certificate and key file exist in the CertificatesDir
|
||||
func CreateEtcdPeerCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
|
||||
caCert, caKey, err := loadCertificateAuthority(cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName)
|
||||
glog.V(1).Infoln("creating a new certificate and key files for etcd peering")
|
||||
etcdCACert, etcdCAKey, err := loadCertificateAuthority(cfg.CertificatesDir, kubeadmconstants.EtcdCACertAndKeyBaseName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
etcdPeerCert, etcdPeerKey, err := NewEtcdPeerCertAndKey(cfg, caCert, caKey)
|
||||
etcdPeerCert, etcdPeerKey, err := NewEtcdPeerCertAndKey(cfg, etcdCACert, etcdCAKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -164,23 +193,47 @@ func CreateEtcdPeerCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
return writeCertificateFilesIfNotExist(
|
||||
cfg.CertificatesDir,
|
||||
kubeadmconstants.EtcdPeerCertAndKeyBaseName,
|
||||
caCert,
|
||||
etcdCACert,
|
||||
etcdPeerCert,
|
||||
etcdPeerKey,
|
||||
)
|
||||
}
|
||||
|
||||
// CreateAPIServerEtcdClientCertAndKeyFiles create a new client certificate for the apiserver calling etcd
|
||||
// If the apiserver-etcd-client certificate and key file already exist in the target folder, they are used only if evaluated equal; otherwise an error is returned.
|
||||
// It assumes the cluster CA certificate and key file exist in the CertificatesDir
|
||||
func CreateAPIServerEtcdClientCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
// CreateEtcdHealthcheckClientCertAndKeyFiles create a new client certificate for liveness probes to healthcheck etcd
|
||||
// If the etcd-healthcheck-client certificate and key file already exist in the target folder, they are used only if evaluated equal; otherwise an error is returned.
|
||||
// It assumes the etcd CA certificate and key file exist in the CertificatesDir
|
||||
func CreateEtcdHealthcheckClientCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
|
||||
caCert, caKey, err := loadCertificateAuthority(cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName)
|
||||
etcdCACert, etcdCAKey, err := loadCertificateAuthority(cfg.CertificatesDir, kubeadmconstants.EtcdCACertAndKeyBaseName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
apiEtcdClientCert, apiEtcdClientKey, err := NewAPIServerEtcdClientCertAndKey(caCert, caKey)
|
||||
etcdHealthcheckClientCert, etcdHealthcheckClientKey, err := NewEtcdHealthcheckClientCertAndKey(etcdCACert, etcdCAKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return writeCertificateFilesIfNotExist(
|
||||
cfg.CertificatesDir,
|
||||
kubeadmconstants.EtcdHealthcheckClientCertAndKeyBaseName,
|
||||
etcdCACert,
|
||||
etcdHealthcheckClientCert,
|
||||
etcdHealthcheckClientKey,
|
||||
)
|
||||
}
|
||||
|
||||
// CreateAPIServerEtcdClientCertAndKeyFiles create a new client certificate for the apiserver calling etcd
|
||||
// If the apiserver-etcd-client certificate and key file already exist in the target folder, they are used only if evaluated equal; otherwise an error is returned.
|
||||
// It assumes the etcd CA certificate and key file exist in the CertificatesDir
|
||||
func CreateAPIServerEtcdClientCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
glog.V(1).Infoln("creating a new client certificate for the apiserver calling etcd")
|
||||
etcdCACert, etcdCAKey, err := loadCertificateAuthority(cfg.CertificatesDir, kubeadmconstants.EtcdCACertAndKeyBaseName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
apiEtcdClientCert, apiEtcdClientKey, err := NewAPIServerEtcdClientCertAndKey(etcdCACert, etcdCAKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -188,7 +241,7 @@ func CreateAPIServerEtcdClientCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguratio
|
||||
return writeCertificateFilesIfNotExist(
|
||||
cfg.CertificatesDir,
|
||||
kubeadmconstants.APIServerEtcdClientCertAndKeyBaseName,
|
||||
caCert,
|
||||
etcdCACert,
|
||||
apiEtcdClientCert,
|
||||
apiEtcdClientKey,
|
||||
)
|
||||
@ -197,7 +250,7 @@ func CreateAPIServerEtcdClientCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguratio
|
||||
// CreateServiceAccountKeyAndPublicKeyFiles create a new public/private key files for signing service account users.
|
||||
// If the sa public/private key files already exists in the target folder, they are used only if evaluated equals; otherwise an error is returned.
|
||||
func CreateServiceAccountKeyAndPublicKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
|
||||
glog.V(1).Infoln("creating a new public/private key files for signing service account users")
|
||||
saSigningKey, err := NewServiceAccountSigningKey()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -216,7 +269,7 @@ func CreateServiceAccountKeyAndPublicKeyFiles(cfg *kubeadmapi.MasterConfiguratio
|
||||
// as front proxies.
|
||||
// If the front proxy CA certificate and key files already exists in the target folder, they are used only if evaluated equals; otherwise an error is returned.
|
||||
func CreateFrontProxyCACertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
|
||||
glog.V(1).Infoln("creating a self signed front proxy CA certificate and key files")
|
||||
frontProxyCACert, frontProxyCAKey, err := NewFrontProxyCACertAndKey()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -232,9 +285,9 @@ func CreateFrontProxyCACertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) erro
|
||||
|
||||
// CreateFrontProxyClientCertAndKeyFiles create a new certificate for proxy server client.
|
||||
// If the front-proxy-client certificate and key files already exists in the target folder, they are used only if evaluated equals; otherwise an error is returned.
|
||||
// It assumes the front proxy CAA certificate and key files should exists into the CertificatesDir
|
||||
// It assumes the front proxy CA certificate and key files exist in the CertificatesDir.
|
||||
func CreateFrontProxyClientCertAndKeyFiles(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
|
||||
glog.V(1).Infoln("creating a new certificate for proxy server client")
|
||||
frontProxyCACert, frontProxyCAKey, err := loadCertificateAuthority(cfg.CertificatesDir, kubeadmconstants.FrontProxyCACertAndKeyBaseName)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -265,7 +318,7 @@ func NewCACertAndKey() (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
return caCert, caKey, nil
|
||||
}
|
||||
|
||||
// NewAPIServerCertAndKey generate CA certificate for apiserver, signed by the given CA.
|
||||
// NewAPIServerCertAndKey generate certificate for apiserver, signed by the given CA.
|
||||
func NewAPIServerCertAndKey(cfg *kubeadmapi.MasterConfiguration, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
|
||||
altNames, err := pkiutil.GetAPIServerAltNames(cfg)
|
||||
@ -286,7 +339,7 @@ func NewAPIServerCertAndKey(cfg *kubeadmapi.MasterConfiguration, caCert *x509.Ce
|
||||
return apiCert, apiKey, nil
|
||||
}
|
||||
|
||||
// NewAPIServerKubeletClientCertAndKey generate CA certificate for the apiservers to connect to the kubelets securely, signed by the given CA.
|
||||
// NewAPIServerKubeletClientCertAndKey generate certificate for the apiservers to connect to the kubelets securely, signed by the given CA.
|
||||
func NewAPIServerKubeletClientCertAndKey(caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
|
||||
config := certutil.Config{
|
||||
@ -302,7 +355,18 @@ func NewAPIServerKubeletClientCertAndKey(caCert *x509.Certificate, caKey *rsa.Pr
|
||||
return apiClientCert, apiClientKey, nil
|
||||
}
|
||||
|
||||
// NewEtcdServerCertAndKey generate CA certificate for etcd, signed by the given CA.
|
||||
// NewEtcdCACertAndKey generate a self signed etcd CA.
|
||||
func NewEtcdCACertAndKey() (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
|
||||
etcdCACert, etcdCAKey, err := pkiutil.NewCertificateAuthority()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failure while generating etcd CA certificate and key: %v", err)
|
||||
}
|
||||
|
||||
return etcdCACert, etcdCAKey, nil
|
||||
}
|
||||
|
||||
// NewEtcdServerCertAndKey generate certificate for etcd, signed by the given CA.
|
||||
func NewEtcdServerCertAndKey(cfg *kubeadmapi.MasterConfiguration, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
|
||||
altNames, err := pkiutil.GetEtcdAltNames(cfg)
|
||||
@ -311,9 +375,9 @@ func NewEtcdServerCertAndKey(cfg *kubeadmapi.MasterConfiguration, caCert *x509.C
|
||||
}
|
||||
|
||||
config := certutil.Config{
|
||||
CommonName: kubeadmconstants.EtcdServerCertCommonName,
|
||||
CommonName: cfg.NodeRegistration.Name,
|
||||
AltNames: *altNames,
|
||||
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
||||
}
|
||||
etcdServerCert, etcdServerKey, err := pkiutil.NewCertAndKey(caCert, caKey, config)
|
||||
if err != nil {
|
||||
@ -323,7 +387,7 @@ func NewEtcdServerCertAndKey(cfg *kubeadmapi.MasterConfiguration, caCert *x509.C
|
||||
return etcdServerCert, etcdServerKey, nil
|
||||
}
|
||||
|
||||
// NewEtcdPeerCertAndKey generate CA certificate for etcd peering, signed by the given CA.
|
||||
// NewEtcdPeerCertAndKey generate certificate for etcd peering, signed by the given CA.
|
||||
func NewEtcdPeerCertAndKey(cfg *kubeadmapi.MasterConfiguration, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
|
||||
altNames, err := pkiutil.GetEtcdPeerAltNames(cfg)
|
||||
@ -332,7 +396,7 @@ func NewEtcdPeerCertAndKey(cfg *kubeadmapi.MasterConfiguration, caCert *x509.Cer
|
||||
}
|
||||
|
||||
config := certutil.Config{
|
||||
CommonName: kubeadmconstants.EtcdPeerCertCommonName,
|
||||
CommonName: cfg.NodeRegistration.Name,
|
||||
AltNames: *altNames,
|
||||
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
||||
}
|
||||
@ -344,7 +408,23 @@ func NewEtcdPeerCertAndKey(cfg *kubeadmapi.MasterConfiguration, caCert *x509.Cer
|
||||
return etcdPeerCert, etcdPeerKey, nil
|
||||
}
|
||||
|
||||
// NewAPIServerEtcdClientCertAndKey generate CA certificate for the apiservers to connect to etcd securely, signed by the given CA.
|
||||
// NewEtcdHealthcheckClientCertAndKey generate certificate for liveness probes to healthcheck etcd, signed by the given CA.
|
||||
func NewEtcdHealthcheckClientCertAndKey(caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
|
||||
config := certutil.Config{
|
||||
CommonName: kubeadmconstants.EtcdHealthcheckClientCertCommonName,
|
||||
Organization: []string{kubeadmconstants.MastersGroup},
|
||||
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
}
|
||||
etcdHealcheckClientCert, etcdHealcheckClientKey, err := pkiutil.NewCertAndKey(caCert, caKey, config)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failure while creating etcd healthcheck client key and certificate: %v", err)
|
||||
}
|
||||
|
||||
return etcdHealcheckClientCert, etcdHealcheckClientKey, nil
|
||||
}
|
||||
|
||||
// NewAPIServerEtcdClientCertAndKey generate certificate for the apiservers to connect to etcd securely, signed by the given CA.
|
||||
func NewAPIServerEtcdClientCertAndKey(caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
|
||||
config := certutil.Config{
|
||||
@ -383,7 +463,7 @@ func NewFrontProxyCACertAndKey() (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
return frontProxyCACert, frontProxyCAKey, nil
|
||||
}
|
||||
|
||||
// NewFrontProxyClientCertAndKey generate CA certificate for proxy server client, signed by the given front proxy CA.
|
||||
// NewFrontProxyClientCertAndKey generate certificate for proxy server client, signed by the given front proxy CA.
|
||||
func NewFrontProxyClientCertAndKey(frontProxyCACert *x509.Certificate, frontProxyCAKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
|
||||
config := certutil.Config{
|
||||
@ -537,8 +617,9 @@ type certKeyLocation struct {
|
||||
uxName string
|
||||
}
|
||||
|
||||
// UsingExternalCA determines whether the user is relying on an external CA. We currently implicitly determine this is the case when the CA Cert
|
||||
// is present but the CA Key is not. This allows us to, e.g., skip generating certs or not start the csr signing controller.
|
||||
// UsingExternalCA determines whether the user is relying on an external CA. We currently implicitly determine this is the case
|
||||
// when both the CA Cert and the front proxy CA Cert are present but the CA Key and front proxy CA Key are not.
|
||||
// This allows us to, e.g., skip generating certs or not start the csr signing controller.
|
||||
func UsingExternalCA(cfg *kubeadmapi.MasterConfiguration) (bool, error) {
|
||||
|
||||
if err := validateCACert(certKeyLocation{cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName, "", "CA"}); err != nil {
|
||||
@ -547,7 +628,7 @@ func UsingExternalCA(cfg *kubeadmapi.MasterConfiguration) (bool, error) {
|
||||
|
||||
caKeyPath := filepath.Join(cfg.CertificatesDir, kubeadmconstants.CAKeyName)
|
||||
if _, err := os.Stat(caKeyPath); !os.IsNotExist(err) {
|
||||
return false, fmt.Errorf("ca.key exists")
|
||||
return false, fmt.Errorf("%s exists", kubeadmconstants.CAKeyName)
|
||||
}
|
||||
|
||||
if err := validateSignedCert(certKeyLocation{cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName, kubeadmconstants.APIServerCertAndKeyBaseName, "API server"}); err != nil {
|
||||
@ -562,10 +643,15 @@ func UsingExternalCA(cfg *kubeadmapi.MasterConfiguration) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if err := validateCACertAndKey(certKeyLocation{cfg.CertificatesDir, kubeadmconstants.FrontProxyCACertAndKeyBaseName, "", "front-proxy CA"}); err != nil {
|
||||
if err := validateCACert(certKeyLocation{cfg.CertificatesDir, kubeadmconstants.FrontProxyCACertAndKeyBaseName, "", "front-proxy CA"}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
frontProxyCAKeyPath := filepath.Join(cfg.CertificatesDir, kubeadmconstants.FrontProxyCAKeyName)
|
||||
if _, err := os.Stat(frontProxyCAKeyPath); !os.IsNotExist(err) {
|
||||
return false, fmt.Errorf("%s exists", kubeadmconstants.FrontProxyCAKeyName)
|
||||
}
|
||||
|
||||
if err := validateSignedCert(certKeyLocation{cfg.CertificatesDir, kubeadmconstants.FrontProxyCACertAndKeyBaseName, kubeadmconstants.FrontProxyClientCertAndKeyBaseName, "front-proxy client"}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
141
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/certs_test.go
generated
vendored
141
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/certs_test.go
generated
vendored
@ -273,9 +273,9 @@ func TestNewAPIServerCertAndKey(t *testing.T) {
|
||||
advertiseAddresses := []string{"1.2.3.4", "1:2:3::4"}
|
||||
for _, addr := range advertiseAddresses {
|
||||
cfg := &kubeadmapi.MasterConfiguration{
|
||||
API: kubeadmapi.API{AdvertiseAddress: addr},
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
|
||||
NodeName: hostname,
|
||||
API: kubeadmapi.API{AdvertiseAddress: addr},
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
|
||||
NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: hostname},
|
||||
}
|
||||
caCert, caKey, err := NewCACertAndKey()
|
||||
if err != nil {
|
||||
@ -310,15 +310,29 @@ func TestNewAPIServerKubeletClientCertAndKey(t *testing.T) {
|
||||
certstestutil.AssertCertificateHasOrganizations(t, apiKubeletClientCert, kubeadmconstants.MastersGroup)
|
||||
}
|
||||
|
||||
func TestNewEtcdCACertAndKey(t *testing.T) {
|
||||
etcdCACert, _, err := NewEtcdCACertAndKey()
|
||||
if err != nil {
|
||||
t.Fatalf("failed creation of cert and key: %v", err)
|
||||
}
|
||||
|
||||
certstestutil.AssertCertificateIsCa(t, etcdCACert)
|
||||
}
|
||||
|
||||
func TestNewEtcdServerCertAndKey(t *testing.T) {
|
||||
proxy := "user-etcd-proxy"
|
||||
proxyIP := "10.10.10.100"
|
||||
|
||||
cfg := &kubeadmapi.MasterConfiguration{
|
||||
NodeRegistration: kubeadmapi.NodeRegistrationOptions{
|
||||
Name: "etcd-server-cert",
|
||||
},
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
ServerCertSANs: []string{
|
||||
proxy,
|
||||
proxyIP,
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
ServerCertSANs: []string{
|
||||
proxy,
|
||||
proxyIP,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -346,12 +360,14 @@ func TestNewEtcdPeerCertAndKey(t *testing.T) {
|
||||
advertiseAddresses := []string{"1.2.3.4", "1:2:3::4"}
|
||||
for _, addr := range advertiseAddresses {
|
||||
cfg := &kubeadmapi.MasterConfiguration{
|
||||
API: kubeadmapi.API{AdvertiseAddress: addr},
|
||||
NodeName: hostname,
|
||||
API: kubeadmapi.API{AdvertiseAddress: addr},
|
||||
NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: hostname},
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
PeerCertSANs: []string{
|
||||
proxy,
|
||||
proxyIP,
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
PeerCertSANs: []string{
|
||||
proxy,
|
||||
proxyIP,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -373,6 +389,22 @@ func TestNewEtcdPeerCertAndKey(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewEtcdHealthcheckClientCertAndKey(t *testing.T) {
|
||||
caCert, caKey, err := NewCACertAndKey()
|
||||
if err != nil {
|
||||
t.Fatalf("failed creation of ca cert and key: %v", err)
|
||||
}
|
||||
|
||||
etcdHealthcheckClientCert, _, err := NewEtcdHealthcheckClientCertAndKey(caCert, caKey)
|
||||
if err != nil {
|
||||
t.Fatalf("failed creation of cert and key: %v", err)
|
||||
}
|
||||
|
||||
certstestutil.AssertCertificateIsSignedByCa(t, etcdHealthcheckClientCert, caCert)
|
||||
certstestutil.AssertCertificateHasClientAuthUsage(t, etcdHealthcheckClientCert)
|
||||
certstestutil.AssertCertificateHasOrganizations(t, etcdHealthcheckClientCert, kubeadmconstants.MastersGroup)
|
||||
}
|
||||
|
||||
func TestNewAPIServerEtcdClientCertAndKey(t *testing.T) {
|
||||
caCert, caKey, err := NewCACertAndKey()
|
||||
if err != nil {
|
||||
@ -441,6 +473,7 @@ func TestUsingExternalCA(t *testing.T) {
|
||||
setupFuncs: []func(cfg *kubeadmapi.MasterConfiguration) error{
|
||||
CreatePKIAssets,
|
||||
deleteCAKey,
|
||||
deleteFrontProxyCAKey,
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
@ -451,10 +484,10 @@ func TestUsingExternalCA(t *testing.T) {
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
cfg := &kubeadmapi.MasterConfiguration{
|
||||
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"},
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
|
||||
NodeName: "valid-hostname",
|
||||
CertificatesDir: dir,
|
||||
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"},
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
|
||||
NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-hostname"},
|
||||
CertificatesDir: dir,
|
||||
}
|
||||
|
||||
for _, f := range test.setupFuncs {
|
||||
@ -481,7 +514,7 @@ func TestValidateMethods(t *testing.T) {
|
||||
{
|
||||
name: "validateCACert",
|
||||
setupFuncs: []func(cfg *kubeadmapi.MasterConfiguration) error{
|
||||
CreateCACertAndKeyfiles,
|
||||
CreateCACertAndKeyFiles,
|
||||
},
|
||||
validateFunc: validateCACert,
|
||||
loc: certKeyLocation{caBaseName: "ca", baseName: "", uxName: "CA"},
|
||||
@ -490,7 +523,7 @@ func TestValidateMethods(t *testing.T) {
|
||||
{
|
||||
name: "validateCACertAndKey (files present)",
|
||||
setupFuncs: []func(cfg *kubeadmapi.MasterConfiguration) error{
|
||||
CreateCACertAndKeyfiles,
|
||||
CreateCACertAndKeyFiles,
|
||||
},
|
||||
validateFunc: validateCACertAndKey,
|
||||
loc: certKeyLocation{caBaseName: "ca", baseName: "", uxName: "CA"},
|
||||
@ -509,7 +542,7 @@ func TestValidateMethods(t *testing.T) {
|
||||
{
|
||||
name: "validateSignedCert",
|
||||
setupFuncs: []func(cfg *kubeadmapi.MasterConfiguration) error{
|
||||
CreateCACertAndKeyfiles,
|
||||
CreateCACertAndKeyFiles,
|
||||
CreateAPIServerCertAndKeyFiles,
|
||||
},
|
||||
validateFunc: validateSignedCert,
|
||||
@ -534,10 +567,10 @@ func TestValidateMethods(t *testing.T) {
|
||||
test.loc.pkiDir = dir
|
||||
|
||||
cfg := &kubeadmapi.MasterConfiguration{
|
||||
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"},
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
|
||||
NodeName: "valid-hostname",
|
||||
CertificatesDir: dir,
|
||||
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"},
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
|
||||
NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-hostname"},
|
||||
CertificatesDir: dir,
|
||||
}
|
||||
|
||||
fmt.Println("Testing", test.name)
|
||||
@ -558,16 +591,17 @@ func TestValidateMethods(t *testing.T) {
|
||||
}
|
||||
|
||||
func deleteCAKey(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
if err := os.Remove(filepath.Join(cfg.CertificatesDir, "ca.key")); err != nil {
|
||||
return fmt.Errorf("failed removing ca.key: %v", err)
|
||||
if err := os.Remove(filepath.Join(cfg.CertificatesDir, kubeadmconstants.CAKeyName)); err != nil {
|
||||
return fmt.Errorf("failed removing %s: %v", kubeadmconstants.CAKeyName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func assertIsCa(t *testing.T, cert *x509.Certificate) {
|
||||
if !cert.IsCA {
|
||||
t.Error("cert is not a valida CA")
|
||||
func deleteFrontProxyCAKey(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
if err := os.Remove(filepath.Join(cfg.CertificatesDir, kubeadmconstants.FrontProxyCAKeyName)); err != nil {
|
||||
return fmt.Errorf("failed removing %s: %v", kubeadmconstants.FrontProxyCAKeyName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestCreateCertificateFilesMethods(t *testing.T) {
|
||||
@ -576,6 +610,7 @@ func TestCreateCertificateFilesMethods(t *testing.T) {
|
||||
setupFunc func(cfg *kubeadmapi.MasterConfiguration) error
|
||||
createFunc func(cfg *kubeadmapi.MasterConfiguration) error
|
||||
expectedFiles []string
|
||||
externalEtcd bool
|
||||
}{
|
||||
{
|
||||
createFunc: CreatePKIAssets,
|
||||
@ -583,8 +618,10 @@ func TestCreateCertificateFilesMethods(t *testing.T) {
|
||||
kubeadmconstants.CACertName, kubeadmconstants.CAKeyName,
|
||||
kubeadmconstants.APIServerCertName, kubeadmconstants.APIServerKeyName,
|
||||
kubeadmconstants.APIServerKubeletClientCertName, kubeadmconstants.APIServerKubeletClientKeyName,
|
||||
kubeadmconstants.EtcdCACertName, kubeadmconstants.EtcdCAKeyName,
|
||||
kubeadmconstants.EtcdServerCertName, kubeadmconstants.EtcdServerKeyName,
|
||||
kubeadmconstants.EtcdPeerCertName, kubeadmconstants.EtcdPeerKeyName,
|
||||
kubeadmconstants.EtcdHealthcheckClientCertName, kubeadmconstants.EtcdHealthcheckClientKeyName,
|
||||
kubeadmconstants.APIServerEtcdClientCertName, kubeadmconstants.APIServerEtcdClientKeyName,
|
||||
kubeadmconstants.ServiceAccountPrivateKeyName, kubeadmconstants.ServiceAccountPublicKeyName,
|
||||
kubeadmconstants.FrontProxyCACertName, kubeadmconstants.FrontProxyCAKeyName,
|
||||
@ -592,31 +629,52 @@ func TestCreateCertificateFilesMethods(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
createFunc: CreateCACertAndKeyfiles,
|
||||
createFunc: CreatePKIAssets,
|
||||
externalEtcd: true,
|
||||
expectedFiles: []string{
|
||||
kubeadmconstants.CACertName, kubeadmconstants.CAKeyName,
|
||||
kubeadmconstants.APIServerCertName, kubeadmconstants.APIServerKeyName,
|
||||
kubeadmconstants.APIServerKubeletClientCertName, kubeadmconstants.APIServerKubeletClientKeyName,
|
||||
kubeadmconstants.ServiceAccountPrivateKeyName, kubeadmconstants.ServiceAccountPublicKeyName,
|
||||
kubeadmconstants.FrontProxyCACertName, kubeadmconstants.FrontProxyCAKeyName,
|
||||
kubeadmconstants.FrontProxyClientCertName, kubeadmconstants.FrontProxyClientKeyName,
|
||||
},
|
||||
},
|
||||
{
|
||||
createFunc: CreateCACertAndKeyFiles,
|
||||
expectedFiles: []string{kubeadmconstants.CACertName, kubeadmconstants.CAKeyName},
|
||||
},
|
||||
{
|
||||
setupFunc: CreateCACertAndKeyfiles,
|
||||
setupFunc: CreateCACertAndKeyFiles,
|
||||
createFunc: CreateAPIServerCertAndKeyFiles,
|
||||
expectedFiles: []string{kubeadmconstants.APIServerCertName, kubeadmconstants.APIServerKeyName},
|
||||
},
|
||||
{
|
||||
setupFunc: CreateCACertAndKeyfiles,
|
||||
setupFunc: CreateCACertAndKeyFiles,
|
||||
createFunc: CreateAPIServerKubeletClientCertAndKeyFiles,
|
||||
expectedFiles: []string{kubeadmconstants.APIServerKubeletClientCertName, kubeadmconstants.APIServerKubeletClientKeyName},
|
||||
},
|
||||
{
|
||||
setupFunc: CreateCACertAndKeyfiles,
|
||||
createFunc: CreateEtcdCACertAndKeyFiles,
|
||||
expectedFiles: []string{kubeadmconstants.EtcdCACertName, kubeadmconstants.EtcdCAKeyName},
|
||||
},
|
||||
{
|
||||
setupFunc: CreateEtcdCACertAndKeyFiles,
|
||||
createFunc: CreateEtcdServerCertAndKeyFiles,
|
||||
expectedFiles: []string{kubeadmconstants.EtcdServerCertName, kubeadmconstants.EtcdServerKeyName},
|
||||
},
|
||||
{
|
||||
setupFunc: CreateCACertAndKeyfiles,
|
||||
setupFunc: CreateEtcdCACertAndKeyFiles,
|
||||
createFunc: CreateEtcdPeerCertAndKeyFiles,
|
||||
expectedFiles: []string{kubeadmconstants.EtcdPeerCertName, kubeadmconstants.EtcdPeerKeyName},
|
||||
},
|
||||
{
|
||||
setupFunc: CreateCACertAndKeyfiles,
|
||||
setupFunc: CreateEtcdCACertAndKeyFiles,
|
||||
createFunc: CreateEtcdHealthcheckClientCertAndKeyFiles,
|
||||
expectedFiles: []string{kubeadmconstants.EtcdHealthcheckClientCertName, kubeadmconstants.EtcdHealthcheckClientKeyName},
|
||||
},
|
||||
{
|
||||
setupFunc: CreateEtcdCACertAndKeyFiles,
|
||||
createFunc: CreateAPIServerEtcdClientCertAndKeyFiles,
|
||||
expectedFiles: []string{kubeadmconstants.APIServerEtcdClientCertName, kubeadmconstants.APIServerEtcdClientKeyName},
|
||||
},
|
||||
@ -641,10 +699,19 @@ func TestCreateCertificateFilesMethods(t *testing.T) {
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
cfg := &kubeadmapi.MasterConfiguration{
|
||||
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"},
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
|
||||
NodeName: "valid-hostname",
|
||||
CertificatesDir: tmpdir,
|
||||
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"},
|
||||
Etcd: kubeadmapi.Etcd{Local: &kubeadmapi.LocalEtcd{}},
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
|
||||
NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-hostname"},
|
||||
CertificatesDir: tmpdir,
|
||||
}
|
||||
|
||||
if test.externalEtcd {
|
||||
if cfg.Etcd.External == nil {
|
||||
cfg.Etcd.External = &kubeadmapi.ExternalEtcd{}
|
||||
}
|
||||
cfg.Etcd.Local = nil
|
||||
cfg.Etcd.External.Endpoints = []string{"192.168.1.1:2379"}
|
||||
}
|
||||
|
||||
// executes setup func (if necessary)
|
||||
|
8
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/doc.go
generated
vendored
8
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/doc.go
generated
vendored
@ -24,8 +24,8 @@ package certs
|
||||
From MasterConfiguration
|
||||
.API.AdvertiseAddress is an optional parameter that can be passed for an extra addition to the SAN IPs
|
||||
.APIServerCertSANs is an optional parameter for adding DNS names and IPs to the API Server serving cert SAN
|
||||
.Etcd.ServerCertSANs is an optional parameter for adding DNS names and IPs to the etcd serving cert SAN
|
||||
.Etcd.PeerCertSANs is an optional parameter for adding DNS names and IPs to the etcd peer cert SAN
|
||||
.Etcd.Local.ServerCertSANs is an optional parameter for adding DNS names and IPs to the etcd serving cert SAN
|
||||
.Etcd.Local.PeerCertSANs is an optional parameter for adding DNS names and IPs to the etcd peer cert SAN
|
||||
.Networking.DNSDomain is needed for knowing which DNS name the internal kubernetes service has
|
||||
.Networking.ServiceSubnet is needed for knowing which IP the internal kubernetes service is going to point to
|
||||
.CertificatesDir is required for knowing where all certificates should be stored
|
||||
@ -40,10 +40,14 @@ package certs
|
||||
- apiserver-kubelet-client.key
|
||||
- apiserver-etcd-client.crt
|
||||
- apiserver-etcd-client.key
|
||||
- etcd/ca.crt
|
||||
- etcd/ca.key
|
||||
- etcd/server.crt
|
||||
- etcd/server.key
|
||||
- etcd/peer.crt
|
||||
- etcd/peer.key
|
||||
- etcd/healthcheck-client.crt
|
||||
- etcd/healthcheck-client.key
|
||||
- sa.pub
|
||||
- sa.key
|
||||
- front-proxy-ca.crt
|
||||
|
1
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil/BUILD
generated
vendored
@ -23,6 +23,7 @@ go_library(
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//pkg/registry/core/service/ipallocator:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/cert:go_default_library",
|
||||
|
31
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers.go
generated
vendored
31
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers.go
generated
vendored
@ -29,6 +29,7 @@ import (
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
|
||||
)
|
||||
|
||||
@ -274,7 +275,7 @@ func GetAPIServerAltNames(cfg *kubeadmapi.MasterConfiguration) (*certutil.AltNam
|
||||
// create AltNames with defaults DNSNames/IPs
|
||||
altNames := &certutil.AltNames{
|
||||
DNSNames: []string{
|
||||
cfg.NodeName,
|
||||
cfg.NodeRegistration.Name,
|
||||
"kubernetes",
|
||||
"kubernetes.default",
|
||||
"kubernetes.default.svc",
|
||||
@ -286,9 +287,17 @@ func GetAPIServerAltNames(cfg *kubeadmapi.MasterConfiguration) (*certutil.AltNam
|
||||
},
|
||||
}
|
||||
|
||||
// add api server dns advertise address
|
||||
// add api server controlPlaneEndpoint if present (dns or ip)
|
||||
if len(cfg.API.ControlPlaneEndpoint) > 0 {
|
||||
altNames.DNSNames = append(altNames.DNSNames, cfg.API.ControlPlaneEndpoint)
|
||||
if host, _, err := kubeadmutil.ParseHostPort(cfg.API.ControlPlaneEndpoint); err == nil {
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
altNames.IPs = append(altNames.IPs, ip)
|
||||
} else {
|
||||
altNames.DNSNames = append(altNames.DNSNames, host)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("error parsing API api.controlPlaneEndpoint %q: %s", cfg.API.ControlPlaneEndpoint, err)
|
||||
}
|
||||
}
|
||||
|
||||
appendSANsToAltNames(altNames, cfg.APIServerCertSANs, kubeadmconstants.APIServerCertName)
|
||||
@ -303,11 +312,13 @@ func GetAPIServerAltNames(cfg *kubeadmapi.MasterConfiguration) (*certutil.AltNam
|
||||
func GetEtcdAltNames(cfg *kubeadmapi.MasterConfiguration) (*certutil.AltNames, error) {
|
||||
// create AltNames with defaults DNSNames/IPs
|
||||
altNames := &certutil.AltNames{
|
||||
DNSNames: []string{"localhost"},
|
||||
IPs: []net.IP{net.IPv4(127, 0, 0, 1)},
|
||||
DNSNames: []string{cfg.NodeRegistration.Name, "localhost"},
|
||||
IPs: []net.IP{net.IPv4(127, 0, 0, 1), net.IPv6loopback},
|
||||
}
|
||||
|
||||
appendSANsToAltNames(altNames, cfg.Etcd.ServerCertSANs, kubeadmconstants.EtcdServerCertName)
|
||||
if cfg.Etcd.Local != nil {
|
||||
appendSANsToAltNames(altNames, cfg.Etcd.Local.ServerCertSANs, kubeadmconstants.EtcdServerCertName)
|
||||
}
|
||||
|
||||
return altNames, nil
|
||||
}
|
||||
@ -325,11 +336,13 @@ func GetEtcdPeerAltNames(cfg *kubeadmapi.MasterConfiguration) (*certutil.AltName
|
||||
|
||||
// create AltNames with defaults DNSNames/IPs
|
||||
altNames := &certutil.AltNames{
|
||||
DNSNames: []string{cfg.NodeName},
|
||||
IPs: []net.IP{advertiseAddress},
|
||||
DNSNames: []string{cfg.NodeRegistration.Name, "localhost"},
|
||||
IPs: []net.IP{advertiseAddress, net.IPv4(127, 0, 0, 1), net.IPv6loopback},
|
||||
}
|
||||
|
||||
appendSANsToAltNames(altNames, cfg.Etcd.PeerCertSANs, kubeadmconstants.EtcdPeerCertName)
|
||||
if cfg.Etcd.Local != nil {
|
||||
appendSANsToAltNames(altNames, cfg.Etcd.Local.PeerCertSANs, kubeadmconstants.EtcdPeerCertName)
|
||||
}
|
||||
|
||||
return altNames, nil
|
||||
}
|
||||
|
119
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers_test.go
generated
vendored
119
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers_test.go
generated
vendored
@ -436,48 +436,69 @@ func TestPathForPublicKey(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetAPIServerAltNames(t *testing.T) {
|
||||
hostname := "valid-hostname"
|
||||
advertiseIP := "1.2.3.4"
|
||||
controlPlaneEndpoint := "api.k8s.io"
|
||||
cfg := &kubeadmapi.MasterConfiguration{
|
||||
API: kubeadmapi.API{AdvertiseAddress: advertiseIP, ControlPlaneEndpoint: controlPlaneEndpoint},
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
|
||||
NodeName: hostname,
|
||||
APIServerCertSANs: []string{"10.1.245.94", "10.1.245.95", "1.2.3.L", "invalid,commas,in,DNS"},
|
||||
|
||||
var tests = []struct {
|
||||
name string
|
||||
cfg *kubeadmapi.MasterConfiguration
|
||||
expectedDNSNames []string
|
||||
expectedIPAddresses []string
|
||||
}{
|
||||
{
|
||||
name: "ControlPlaneEndpoint DNS",
|
||||
cfg: &kubeadmapi.MasterConfiguration{
|
||||
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", ControlPlaneEndpoint: "api.k8s.io:6443"},
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
|
||||
NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-hostname"},
|
||||
APIServerCertSANs: []string{"10.1.245.94", "10.1.245.95", "1.2.3.L", "invalid,commas,in,DNS"},
|
||||
},
|
||||
expectedDNSNames: []string{"valid-hostname", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster.local", "api.k8s.io"},
|
||||
expectedIPAddresses: []string{"10.96.0.1", "1.2.3.4", "10.1.245.94", "10.1.245.95"},
|
||||
},
|
||||
{
|
||||
name: "ControlPlaneEndpoint IP",
|
||||
cfg: &kubeadmapi.MasterConfiguration{
|
||||
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", ControlPlaneEndpoint: "4.5.6.7:6443"},
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
|
||||
NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-hostname"},
|
||||
APIServerCertSANs: []string{"10.1.245.94", "10.1.245.95", "1.2.3.L", "invalid,commas,in,DNS"},
|
||||
},
|
||||
expectedDNSNames: []string{"valid-hostname", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster.local"},
|
||||
expectedIPAddresses: []string{"10.96.0.1", "1.2.3.4", "10.1.245.94", "10.1.245.95", "4.5.6.7"},
|
||||
},
|
||||
}
|
||||
|
||||
altNames, err := GetAPIServerAltNames(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("failed calling GetAPIServerAltNames: %v", err)
|
||||
}
|
||||
for _, rt := range tests {
|
||||
altNames, err := GetAPIServerAltNames(rt.cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("failed calling GetAPIServerAltNames: %s: %v", rt.name, err)
|
||||
}
|
||||
|
||||
expectedDNSNames := []string{hostname, "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster.local", controlPlaneEndpoint}
|
||||
for _, DNSName := range expectedDNSNames {
|
||||
found := false
|
||||
for _, val := range altNames.DNSNames {
|
||||
if val == DNSName {
|
||||
found = true
|
||||
break
|
||||
for _, DNSName := range rt.expectedDNSNames {
|
||||
found := false
|
||||
for _, val := range altNames.DNSNames {
|
||||
if val == DNSName {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
t.Errorf("%s: altNames does not contain DNSName %s but %v", rt.name, DNSName, altNames.DNSNames)
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
t.Errorf("altNames does not contain DNSName %s", DNSName)
|
||||
}
|
||||
}
|
||||
|
||||
expectedIPAddresses := []string{"10.96.0.1", advertiseIP, "10.1.245.94", "10.1.245.95"}
|
||||
for _, IPAddress := range expectedIPAddresses {
|
||||
found := false
|
||||
for _, val := range altNames.IPs {
|
||||
if val.Equal(net.ParseIP(IPAddress)) {
|
||||
found = true
|
||||
break
|
||||
for _, IPAddress := range rt.expectedIPAddresses {
|
||||
found := false
|
||||
for _, val := range altNames.IPs {
|
||||
if val.Equal(net.ParseIP(IPAddress)) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
t.Errorf("altNames does not contain IPAddress %s", IPAddress)
|
||||
if !found {
|
||||
t.Errorf("%s: altNames does not contain IPAddress %s but %v", rt.name, IPAddress, altNames.IPs)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -487,11 +508,13 @@ func TestGetEtcdAltNames(t *testing.T) {
|
||||
proxyIP := "10.10.10.100"
|
||||
cfg := &kubeadmapi.MasterConfiguration{
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
ServerCertSANs: []string{
|
||||
proxy,
|
||||
proxyIP,
|
||||
"1.2.3.L",
|
||||
"invalid,commas,in,DNS",
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
ServerCertSANs: []string{
|
||||
proxy,
|
||||
proxyIP,
|
||||
"1.2.3.L",
|
||||
"invalid,commas,in,DNS",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -516,7 +539,7 @@ func TestGetEtcdAltNames(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
expectedIPAddresses := []string{"127.0.0.1", proxyIP}
|
||||
expectedIPAddresses := []string{"127.0.0.1", net.IPv6loopback.String(), proxyIP}
|
||||
for _, IPAddress := range expectedIPAddresses {
|
||||
found := false
|
||||
for _, val := range altNames.IPs {
|
||||
@ -538,14 +561,16 @@ func TestGetEtcdPeerAltNames(t *testing.T) {
|
||||
proxyIP := "10.10.10.100"
|
||||
advertiseIP := "1.2.3.4"
|
||||
cfg := &kubeadmapi.MasterConfiguration{
|
||||
API: kubeadmapi.API{AdvertiseAddress: advertiseIP},
|
||||
NodeName: hostname,
|
||||
API: kubeadmapi.API{AdvertiseAddress: advertiseIP},
|
||||
NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: hostname},
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
PeerCertSANs: []string{
|
||||
proxy,
|
||||
proxyIP,
|
||||
"1.2.3.L",
|
||||
"invalid,commas,in,DNS",
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
PeerCertSANs: []string{
|
||||
proxy,
|
||||
proxyIP,
|
||||
"1.2.3.L",
|
||||
"invalid,commas,in,DNS",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane/BUILD
generated
vendored
8
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane/BUILD
generated
vendored
@ -19,10 +19,11 @@ go_test(
|
||||
"//cmd/kubeadm/app/features:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/certs:go_default_library",
|
||||
"//cmd/kubeadm/test:go_default_library",
|
||||
"//pkg/master/reconcilers:go_default_library",
|
||||
"//pkg/kubeapiserver/authorizer/modes:go_default_library",
|
||||
"//pkg/util/pointer:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -35,7 +36,7 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane",
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library",
|
||||
"//cmd/kubeadm/app/apis/kubeadm/v1alpha2:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/features:go_default_library",
|
||||
"//cmd/kubeadm/app/images:go_default_library",
|
||||
@ -43,9 +44,8 @@ go_library(
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//cmd/kubeadm/app/util/staticpod:go_default_library",
|
||||
"//pkg/kubeapiserver/authorizer/modes:go_default_library",
|
||||
"//pkg/master/reconcilers:go_default_library",
|
||||
"//pkg/util/pointer:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
],
|
||||
|
187
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane/manifests.go
generated
vendored
187
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane/manifests.go
generated
vendored
@ -24,10 +24,11 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
|
||||
kubeadmapiv1alpha2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha2"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/features"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/images"
|
||||
@ -35,36 +36,30 @@ import (
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
staticpodutil "k8s.io/kubernetes/cmd/kubeadm/app/util/staticpod"
|
||||
authzmodes "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes"
|
||||
"k8s.io/kubernetes/pkg/master/reconcilers"
|
||||
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
|
||||
"k8s.io/kubernetes/pkg/util/version"
|
||||
)
|
||||
|
||||
// Static pod definitions in golang form are included below so that `kubeadm init` can get going.
|
||||
const (
|
||||
DefaultCloudConfigPath = "/etc/kubernetes/cloud-config"
|
||||
|
||||
deprecatedV19AdmissionControl = "NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
defaultV19AdmissionControl = "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
)
|
||||
|
||||
// CreateInitStaticPodManifestFiles will write all static pod manifest files needed to bring up the control plane.
|
||||
func CreateInitStaticPodManifestFiles(manifestDir string, cfg *kubeadmapi.MasterConfiguration) error {
|
||||
glog.V(1).Infoln("[controlplane] creating static pod files")
|
||||
return createStaticPodFiles(manifestDir, cfg, kubeadmconstants.KubeAPIServer, kubeadmconstants.KubeControllerManager, kubeadmconstants.KubeScheduler)
|
||||
}
|
||||
|
||||
// CreateAPIServerStaticPodManifestFile will write APIserver static pod manifest file.
|
||||
func CreateAPIServerStaticPodManifestFile(manifestDir string, cfg *kubeadmapi.MasterConfiguration) error {
|
||||
glog.V(1).Infoln("creating APIserver static pod files")
|
||||
return createStaticPodFiles(manifestDir, cfg, kubeadmconstants.KubeAPIServer)
|
||||
}
|
||||
|
||||
// CreateControllerManagerStaticPodManifestFile will write controller manager static pod manifest file.
|
||||
func CreateControllerManagerStaticPodManifestFile(manifestDir string, cfg *kubeadmapi.MasterConfiguration) error {
|
||||
glog.V(1).Infoln("creating controller manager static pod files")
|
||||
return createStaticPodFiles(manifestDir, cfg, kubeadmconstants.KubeControllerManager)
|
||||
}
|
||||
|
||||
// CreateSchedulerStaticPodManifestFile will write scheduler static pod manifest file.
|
||||
func CreateSchedulerStaticPodManifestFile(manifestDir string, cfg *kubeadmapi.MasterConfiguration) error {
|
||||
glog.V(1).Infoln("creating scheduler static pod files")
|
||||
return createStaticPodFiles(manifestDir, cfg, kubeadmconstants.KubeScheduler)
|
||||
}
|
||||
|
||||
@ -79,8 +74,8 @@ func GetStaticPodSpecs(cfg *kubeadmapi.MasterConfiguration, k8sVersion *version.
|
||||
kubeadmconstants.KubeAPIServer: staticpodutil.ComponentPod(v1.Container{
|
||||
Name: kubeadmconstants.KubeAPIServer,
|
||||
Image: images.GetCoreImage(kubeadmconstants.KubeAPIServer, cfg.GetControlPlaneImageRepository(), cfg.KubernetesVersion, cfg.UnifiedControlPlaneImage),
|
||||
ImagePullPolicy: cfg.ImagePullPolicy,
|
||||
Command: getAPIServerCommand(cfg, k8sVersion),
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
Command: getAPIServerCommand(cfg),
|
||||
VolumeMounts: staticpodutil.VolumeMountMapToSlice(mounts.GetVolumeMounts(kubeadmconstants.KubeAPIServer)),
|
||||
LivenessProbe: staticpodutil.ComponentProbe(cfg, kubeadmconstants.KubeAPIServer, int(cfg.API.BindPort), "/healthz", v1.URISchemeHTTPS),
|
||||
Resources: staticpodutil.ComponentResources("250m"),
|
||||
@ -89,7 +84,7 @@ func GetStaticPodSpecs(cfg *kubeadmapi.MasterConfiguration, k8sVersion *version.
|
||||
kubeadmconstants.KubeControllerManager: staticpodutil.ComponentPod(v1.Container{
|
||||
Name: kubeadmconstants.KubeControllerManager,
|
||||
Image: images.GetCoreImage(kubeadmconstants.KubeControllerManager, cfg.GetControlPlaneImageRepository(), cfg.KubernetesVersion, cfg.UnifiedControlPlaneImage),
|
||||
ImagePullPolicy: cfg.ImagePullPolicy,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
Command: getControllerManagerCommand(cfg, k8sVersion),
|
||||
VolumeMounts: staticpodutil.VolumeMountMapToSlice(mounts.GetVolumeMounts(kubeadmconstants.KubeControllerManager)),
|
||||
LivenessProbe: staticpodutil.ComponentProbe(cfg, kubeadmconstants.KubeControllerManager, 10252, "/healthz", v1.URISchemeHTTP),
|
||||
@ -99,7 +94,7 @@ func GetStaticPodSpecs(cfg *kubeadmapi.MasterConfiguration, k8sVersion *version.
|
||||
kubeadmconstants.KubeScheduler: staticpodutil.ComponentPod(v1.Container{
|
||||
Name: kubeadmconstants.KubeScheduler,
|
||||
Image: images.GetCoreImage(kubeadmconstants.KubeScheduler, cfg.GetControlPlaneImageRepository(), cfg.KubernetesVersion, cfg.UnifiedControlPlaneImage),
|
||||
ImagePullPolicy: cfg.ImagePullPolicy,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
Command: getSchedulerCommand(cfg),
|
||||
VolumeMounts: staticpodutil.VolumeMountMapToSlice(mounts.GetVolumeMounts(kubeadmconstants.KubeScheduler)),
|
||||
LivenessProbe: staticpodutil.ComponentProbe(cfg, kubeadmconstants.KubeScheduler, 10251, "/healthz", v1.URISchemeHTTP),
|
||||
@ -107,19 +102,6 @@ func GetStaticPodSpecs(cfg *kubeadmapi.MasterConfiguration, k8sVersion *version.
|
||||
Env: getProxyEnvVars(),
|
||||
}, mounts.GetVolumes(kubeadmconstants.KubeScheduler)),
|
||||
}
|
||||
|
||||
// Some cloud providers need extra privileges for example to load node information from a config drive
|
||||
// TODO: when we fully to external cloud providers and the api server and controller manager do not need
|
||||
// to call out to cloud provider code, we can remove the support for the PrivilegedPods
|
||||
if cfg.PrivilegedPods {
|
||||
staticPodSpecs[kubeadmconstants.KubeAPIServer].Spec.Containers[0].SecurityContext = &v1.SecurityContext{
|
||||
Privileged: utilpointer.BoolPtr(true),
|
||||
}
|
||||
staticPodSpecs[kubeadmconstants.KubeControllerManager].Spec.Containers[0].SecurityContext = &v1.SecurityContext{
|
||||
Privileged: utilpointer.BoolPtr(true),
|
||||
}
|
||||
}
|
||||
|
||||
return staticPodSpecs
|
||||
}
|
||||
|
||||
@ -132,6 +114,7 @@ func createStaticPodFiles(manifestDir string, cfg *kubeadmapi.MasterConfiguratio
|
||||
}
|
||||
|
||||
// gets the StaticPodSpecs, actualized for the current MasterConfiguration
|
||||
glog.V(1).Infoln("[controlplane] getting StaticPodSpecs")
|
||||
specs := GetStaticPodSpecs(cfg, k8sVersion)
|
||||
|
||||
// creates required static pod specs
|
||||
@ -147,18 +130,23 @@ func createStaticPodFiles(manifestDir string, cfg *kubeadmapi.MasterConfiguratio
|
||||
return fmt.Errorf("failed to create static pod manifest file for %q: %v", componentName, err)
|
||||
}
|
||||
|
||||
fmt.Printf("[controlplane] Wrote Static Pod manifest for component %s to %q\n", componentName, kubeadmconstants.GetStaticPodFilepath(componentName, manifestDir))
|
||||
fmt.Printf("[controlplane] wrote Static Pod manifest for component %s to %q\n", componentName, kubeadmconstants.GetStaticPodFilepath(componentName, manifestDir))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getAPIServerCommand builds the right API server command from the given config object and version
|
||||
func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration, k8sVersion *version.Version) []string {
|
||||
func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration) []string {
|
||||
defaultArguments := map[string]string{
|
||||
"advertise-address": cfg.API.AdvertiseAddress,
|
||||
"insecure-port": "0",
|
||||
"admission-control": defaultV19AdmissionControl,
|
||||
"advertise-address": cfg.API.AdvertiseAddress,
|
||||
"insecure-port": "0",
|
||||
"enable-admission-plugins": "NodeRestriction",
|
||||
// TODO: remove `PersistentVolumeLabel` in kubeadm v1.11, as it's automatically disabled in v1.11.
|
||||
// ref: https://github.com/kubernetes/kubernetes/pull/64326
|
||||
// we can't skip it now as we support v1.10 clusters still.
|
||||
// remove it from the unit tests too.
|
||||
"disable-admission-plugins": "PersistentVolumeLabel",
|
||||
"service-cluster-ip-range": cfg.Networking.ServiceSubnet,
|
||||
"service-account-key-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.ServiceAccountPublicKeyName),
|
||||
"client-ca-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.CACertName),
|
||||
@ -183,76 +171,69 @@ func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration, k8sVersion *versio
|
||||
|
||||
command := []string{"kube-apiserver"}
|
||||
|
||||
if cfg.CloudProvider == "aws" || cfg.CloudProvider == "gce" {
|
||||
defaultArguments["admission-control"] = deprecatedV19AdmissionControl
|
||||
}
|
||||
|
||||
command = append(command, kubeadmutil.BuildArgumentListFromMap(defaultArguments, cfg.APIServerExtraArgs)...)
|
||||
command = append(command, getAuthzParameters(cfg.AuthorizationModes)...)
|
||||
|
||||
// If the user set endpoints for an external etcd cluster
|
||||
if len(cfg.Etcd.Endpoints) > 0 {
|
||||
command = append(command, fmt.Sprintf("--etcd-servers=%s", strings.Join(cfg.Etcd.Endpoints, ",")))
|
||||
if cfg.Etcd.External != nil {
|
||||
defaultArguments["etcd-servers"] = strings.Join(cfg.Etcd.External.Endpoints, ",")
|
||||
|
||||
// Use any user supplied etcd certificates
|
||||
if cfg.Etcd.CAFile != "" {
|
||||
command = append(command, fmt.Sprintf("--etcd-cafile=%s", cfg.Etcd.CAFile))
|
||||
if cfg.Etcd.External.CAFile != "" {
|
||||
defaultArguments["etcd-cafile"] = cfg.Etcd.External.CAFile
|
||||
}
|
||||
if cfg.Etcd.CertFile != "" && cfg.Etcd.KeyFile != "" {
|
||||
etcdClientFileArg := fmt.Sprintf("--etcd-certfile=%s", cfg.Etcd.CertFile)
|
||||
etcdKeyFileArg := fmt.Sprintf("--etcd-keyfile=%s", cfg.Etcd.KeyFile)
|
||||
command = append(command, etcdClientFileArg, etcdKeyFileArg)
|
||||
if cfg.Etcd.External.CertFile != "" && cfg.Etcd.External.KeyFile != "" {
|
||||
defaultArguments["etcd-certfile"] = cfg.Etcd.External.CertFile
|
||||
defaultArguments["etcd-keyfile"] = cfg.Etcd.External.KeyFile
|
||||
}
|
||||
} else {
|
||||
// Default to etcd static pod on localhost
|
||||
etcdEndpointsArg := "--etcd-servers=https://127.0.0.1:2379"
|
||||
etcdCAFileArg := fmt.Sprintf("--etcd-cafile=%s", filepath.Join(cfg.CertificatesDir, kubeadmconstants.CACertName))
|
||||
etcdClientFileArg := fmt.Sprintf("--etcd-certfile=%s", filepath.Join(cfg.CertificatesDir, kubeadmconstants.APIServerEtcdClientCertName))
|
||||
etcdKeyFileArg := fmt.Sprintf("--etcd-keyfile=%s", filepath.Join(cfg.CertificatesDir, kubeadmconstants.APIServerEtcdClientKeyName))
|
||||
command = append(command, etcdEndpointsArg, etcdCAFileArg, etcdClientFileArg, etcdKeyFileArg)
|
||||
|
||||
// Warn for unused user supplied variables
|
||||
if cfg.Etcd.CAFile != "" {
|
||||
fmt.Printf("[controlplane] WARNING: Configuration for %s CAFile, %s, is unused without providing Endpoints for external %s\n", kubeadmconstants.Etcd, cfg.Etcd.CAFile, kubeadmconstants.Etcd)
|
||||
}
|
||||
if cfg.Etcd.CertFile != "" {
|
||||
fmt.Printf("[controlplane] WARNING: Configuration for %s CertFile, %s, is unused without providing Endpoints for external %s\n", kubeadmconstants.Etcd, cfg.Etcd.CertFile, kubeadmconstants.Etcd)
|
||||
}
|
||||
if cfg.Etcd.KeyFile != "" {
|
||||
fmt.Printf("[controlplane] WARNING: Configuration for %s KeyFile, %s, is unused without providing Endpoints for external %s\n", kubeadmconstants.Etcd, cfg.Etcd.KeyFile, kubeadmconstants.Etcd)
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.CloudProvider != "" {
|
||||
command = append(command, "--cloud-provider="+cfg.CloudProvider)
|
||||
|
||||
// Only append the --cloud-config option if there's a such file
|
||||
if _, err := os.Stat(DefaultCloudConfigPath); err == nil {
|
||||
command = append(command, "--cloud-config="+DefaultCloudConfigPath)
|
||||
}
|
||||
defaultArguments["etcd-servers"] = "https://127.0.0.1:2379"
|
||||
defaultArguments["etcd-cafile"] = filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdCACertName)
|
||||
defaultArguments["etcd-certfile"] = filepath.Join(cfg.CertificatesDir, kubeadmconstants.APIServerEtcdClientCertName)
|
||||
defaultArguments["etcd-keyfile"] = filepath.Join(cfg.CertificatesDir, kubeadmconstants.APIServerEtcdClientKeyName)
|
||||
}
|
||||
|
||||
if features.Enabled(cfg.FeatureGates, features.HighAvailability) {
|
||||
command = append(command, "--endpoint-reconciler-type="+reconcilers.LeaseEndpointReconcilerType)
|
||||
defaultArguments["endpoint-reconciler-type"] = kubeadmconstants.LeaseEndpointReconcilerType
|
||||
}
|
||||
|
||||
if features.Enabled(cfg.FeatureGates, features.DynamicKubeletConfig) {
|
||||
command = append(command, "--feature-gates=DynamicKubeletConfig=true")
|
||||
defaultArguments["feature-gates"] = "DynamicKubeletConfig=true"
|
||||
}
|
||||
|
||||
if features.Enabled(cfg.FeatureGates, features.Auditing) {
|
||||
command = append(command, "--audit-policy-file="+kubeadmconstants.GetStaticPodAuditPolicyFile())
|
||||
command = append(command, "--audit-log-path="+filepath.Join(kubeadmconstants.StaticPodAuditPolicyLogDir, kubeadmconstants.AuditPolicyLogFile))
|
||||
defaultArguments["audit-policy-file"] = kubeadmconstants.GetStaticPodAuditPolicyFile()
|
||||
defaultArguments["audit-log-path"] = filepath.Join(kubeadmconstants.StaticPodAuditPolicyLogDir, kubeadmconstants.AuditPolicyLogFile)
|
||||
if cfg.AuditPolicyConfiguration.LogMaxAge == nil {
|
||||
command = append(command, fmt.Sprintf("--audit-log-maxage=%d", kubeadmapiext.DefaultAuditPolicyLogMaxAge))
|
||||
defaultArguments["audit-log-maxage"] = fmt.Sprintf("%d", kubeadmapiv1alpha2.DefaultAuditPolicyLogMaxAge)
|
||||
} else {
|
||||
command = append(command, fmt.Sprintf("--audit-log-maxage=%d", *cfg.AuditPolicyConfiguration.LogMaxAge))
|
||||
defaultArguments["audit-log-maxage"] = fmt.Sprintf("%d", *cfg.AuditPolicyConfiguration.LogMaxAge)
|
||||
}
|
||||
}
|
||||
if cfg.APIServerExtraArgs == nil {
|
||||
cfg.APIServerExtraArgs = map[string]string{}
|
||||
}
|
||||
cfg.APIServerExtraArgs["authorization-mode"] = getAuthzModes(cfg.APIServerExtraArgs["authorization-mode"])
|
||||
command = append(command, kubeadmutil.BuildArgumentListFromMap(defaultArguments, cfg.APIServerExtraArgs)...)
|
||||
|
||||
return command
|
||||
}
|
||||
|
||||
// getAuthzModes gets the authorization-related parameters to the api server
|
||||
// Node,RBAC should be fixed in this order at the beginning
|
||||
// AlwaysAllow and AlwaysDeny is ignored as they are only for testing
|
||||
func getAuthzModes(authzModeExtraArgs string) string {
|
||||
modes := []string{
|
||||
authzmodes.ModeNode,
|
||||
authzmodes.ModeRBAC,
|
||||
}
|
||||
if strings.Contains(authzModeExtraArgs, authzmodes.ModeABAC) {
|
||||
modes = append(modes, authzmodes.ModeABAC)
|
||||
}
|
||||
if strings.Contains(authzModeExtraArgs, authzmodes.ModeWebhook) {
|
||||
modes = append(modes, authzmodes.ModeWebhook)
|
||||
}
|
||||
return strings.Join(modes, ",")
|
||||
}
|
||||
|
||||
// calcNodeCidrSize determines the size of the subnets used on each node, based
|
||||
// on the pod subnet provided. For IPv4, we assume that the pod subnet will
|
||||
// be /16 and use /24. If the pod subnet cannot be parsed, the IPv4 value will
|
||||
@ -270,9 +251,6 @@ func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration, k8sVersion *versio
|
||||
// If the pod network size is /113 or larger, the node CIDR will be set to the same
|
||||
// size and this will be rejected later in validation.
|
||||
//
|
||||
// NOTE: Currently, the pod network must be /66 or larger. It is not reflected here,
|
||||
// but a smaller value will fail later validation.
|
||||
//
|
||||
// NOTE: Currently, the design allows a maximum of 64K nodes. This algorithm splits
|
||||
// the available bits to maximize the number used for nodes, but still have the node
|
||||
// CIDR be a multiple of eight.
|
||||
@ -321,25 +299,18 @@ func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration, k8sVersion
|
||||
defaultArguments["cluster-signing-cert-file"] = ""
|
||||
}
|
||||
|
||||
command := []string{"kube-controller-manager"}
|
||||
command = append(command, kubeadmutil.BuildArgumentListFromMap(defaultArguments, cfg.ControllerManagerExtraArgs)...)
|
||||
|
||||
if cfg.CloudProvider != "" {
|
||||
command = append(command, "--cloud-provider="+cfg.CloudProvider)
|
||||
|
||||
// Only append the --cloud-config option if there's a such file
|
||||
if _, err := os.Stat(DefaultCloudConfigPath); err == nil {
|
||||
command = append(command, "--cloud-config="+DefaultCloudConfigPath)
|
||||
}
|
||||
}
|
||||
|
||||
// Let the controller-manager allocate Node CIDRs for the Pod network.
|
||||
// Each node will get a subspace of the address CIDR provided with --pod-network-cidr.
|
||||
if cfg.Networking.PodSubnet != "" {
|
||||
maskSize := calcNodeCidrSize(cfg.Networking.PodSubnet)
|
||||
command = append(command, "--allocate-node-cidrs=true", "--cluster-cidr="+cfg.Networking.PodSubnet,
|
||||
"--node-cidr-mask-size="+maskSize)
|
||||
defaultArguments["allocate-node-cidrs"] = "true"
|
||||
defaultArguments["cluster-cidr"] = cfg.Networking.PodSubnet
|
||||
defaultArguments["node-cidr-mask-size"] = maskSize
|
||||
}
|
||||
|
||||
command := []string{"kube-controller-manager"}
|
||||
command = append(command, kubeadmutil.BuildArgumentListFromMap(defaultArguments, cfg.ControllerManagerExtraArgs)...)
|
||||
|
||||
return command
|
||||
}
|
||||
|
||||
@ -374,25 +345,3 @@ func getProxyEnvVars() []v1.EnvVar {
|
||||
}
|
||||
return envs
|
||||
}
|
||||
|
||||
// getAuthzParameters gets the authorization-related parameters to the api server
|
||||
// At this point, we can assume the list of authorization modes is valid (due to that it has been validated in the API machinery code already)
|
||||
// If the list is empty; it's defaulted (mostly for unit testing)
|
||||
func getAuthzParameters(modes []string) []string {
|
||||
command := []string{}
|
||||
strset := sets.NewString(modes...)
|
||||
|
||||
if len(modes) == 0 {
|
||||
return []string{fmt.Sprintf("--authorization-mode=%s", kubeadmapiext.DefaultAuthorizationModes)}
|
||||
}
|
||||
|
||||
if strset.Has(authzmodes.ModeABAC) {
|
||||
command = append(command, "--authorization-policy-file="+kubeadmconstants.AuthorizationPolicyPath)
|
||||
}
|
||||
if strset.Has(authzmodes.ModeWebhook) {
|
||||
command = append(command, "--authorization-webhook-config-file="+kubeadmconstants.AuthorizationWebhookConfigPath)
|
||||
}
|
||||
|
||||
command = append(command, "--authorization-mode="+strings.Join(modes, ","))
|
||||
return command
|
||||
}
|
||||
|
727
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane/manifests_test.go
generated
vendored
727
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane/manifests_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
62
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane/volumes.go
generated
vendored
62
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane/volumes.go
generated
vendored
@ -33,16 +33,14 @@ import (
|
||||
const (
|
||||
caCertsVolumeName = "ca-certs"
|
||||
caCertsVolumePath = "/etc/ssl/certs"
|
||||
caCertsPkiVolumeName = "ca-certs-etc-pki"
|
||||
flexvolumeDirVolumeName = "flexvolume-dir"
|
||||
cloudConfigVolumeName = "cloud-config"
|
||||
flexvolumeDirVolumePath = "/usr/libexec/kubernetes/kubelet-plugins/volume/exec"
|
||||
)
|
||||
|
||||
// caCertsPkiVolumePath specifies the path that can be conditionally mounted into the apiserver and controller-manager containers
|
||||
// as /etc/ssl/certs might be a symlink to it. It's a variable since it may be changed in unit testing. This var MUST NOT be changed
|
||||
// in normal codepaths during runtime.
|
||||
var caCertsPkiVolumePath = "/etc/pki"
|
||||
// caCertsExtraVolumePaths specifies the paths that can be conditionally mounted into the apiserver and controller-manager containers
|
||||
// as /etc/ssl/certs might be or contain a symlink to them. It's a variable since it may be changed in unit testing. This var MUST
|
||||
// NOT be changed in normal codepaths during runtime.
|
||||
var caCertsExtraVolumePaths = []string{"/etc/pki", "/usr/share/ca-certificates", "/usr/local/share/ca-certificates", "/etc/ca-certificates"}
|
||||
|
||||
// getHostPathVolumesForTheControlPlane gets the required hostPath volumes and mounts for the control plane
|
||||
func getHostPathVolumesForTheControlPlane(cfg *kubeadmapi.MasterConfiguration) controlPlaneHostPathMounts {
|
||||
@ -64,8 +62,8 @@ func getHostPathVolumesForTheControlPlane(cfg *kubeadmapi.MasterConfiguration) c
|
||||
mounts.NewHostPathMount(kubeadmconstants.KubeAPIServer, kubeadmconstants.KubeAuditPolicyLogVolumeName, cfg.AuditPolicyConfiguration.LogDir, kubeadmconstants.StaticPodAuditPolicyLogDir, false, &hostPathDirectoryOrCreate)
|
||||
}
|
||||
// If external etcd is specified, mount the directories needed for accessing the CA/serving certs and the private key
|
||||
if len(cfg.Etcd.Endpoints) != 0 {
|
||||
etcdVols, etcdVolMounts := getEtcdCertVolumes(cfg.Etcd, cfg.CertificatesDir)
|
||||
if cfg.Etcd.External != nil {
|
||||
etcdVols, etcdVolMounts := getEtcdCertVolumes(cfg.Etcd.External, cfg.CertificatesDir)
|
||||
mounts.AddHostPathMounts(kubeadmconstants.KubeAPIServer, etcdVols, etcdVolMounts)
|
||||
}
|
||||
|
||||
@ -78,13 +76,6 @@ func getHostPathVolumesForTheControlPlane(cfg *kubeadmapi.MasterConfiguration) c
|
||||
// Read-only mount for the controller manager kubeconfig file
|
||||
controllerManagerKubeConfigFile := filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.ControllerManagerKubeConfigFileName)
|
||||
mounts.NewHostPathMount(kubeadmconstants.KubeControllerManager, kubeadmconstants.KubeConfigVolumeName, controllerManagerKubeConfigFile, controllerManagerKubeConfigFile, true, &hostPathFileOrCreate)
|
||||
// Read-only mount of the cloud config file if present
|
||||
if cfg.CloudProvider != "" {
|
||||
if _, err := os.Stat(DefaultCloudConfigPath); err == nil {
|
||||
mounts.NewHostPathMount(kubeadmconstants.KubeAPIServer, cloudConfigVolumeName, DefaultCloudConfigPath, DefaultCloudConfigPath, true, &hostPathFileOrCreate)
|
||||
mounts.NewHostPathMount(kubeadmconstants.KubeControllerManager, cloudConfigVolumeName, DefaultCloudConfigPath, DefaultCloudConfigPath, true, &hostPathFileOrCreate)
|
||||
}
|
||||
}
|
||||
// Mount for the flexvolume directory (/usr/libexec/kubernetes/kubelet-plugins/volume/exec) directory
|
||||
// Flexvolume dir must NOT be readonly as it is used for third-party plugins to integrate with their storage backends via unix domain socket.
|
||||
if stat, err := os.Stat(flexvolumeDirVolumePath); err == nil && stat.IsDir() {
|
||||
@ -96,18 +87,21 @@ func getHostPathVolumesForTheControlPlane(cfg *kubeadmapi.MasterConfiguration) c
|
||||
schedulerKubeConfigFile := filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.SchedulerKubeConfigFileName)
|
||||
mounts.NewHostPathMount(kubeadmconstants.KubeScheduler, kubeadmconstants.KubeConfigVolumeName, schedulerKubeConfigFile, schedulerKubeConfigFile, true, &hostPathFileOrCreate)
|
||||
|
||||
// On some systems were we host-mount /etc/ssl/certs, it is also required to mount /etc/pki. This is needed
|
||||
// due to symlinks pointing from files in /etc/ssl/certs into /etc/pki/
|
||||
if isPkiVolumeMountNeeded() {
|
||||
mounts.NewHostPathMount(kubeadmconstants.KubeAPIServer, caCertsPkiVolumeName, caCertsPkiVolumePath, caCertsPkiVolumePath, true, &hostPathDirectoryOrCreate)
|
||||
mounts.NewHostPathMount(kubeadmconstants.KubeControllerManager, caCertsPkiVolumeName, caCertsPkiVolumePath, caCertsPkiVolumePath, true, &hostPathDirectoryOrCreate)
|
||||
// On some systems were we host-mount /etc/ssl/certs, it is also required to mount additional directories.
|
||||
// This is needed due to symlinks pointing from files in /etc/ssl/certs to these directories.
|
||||
for _, caCertsExtraVolumePath := range caCertsExtraVolumePaths {
|
||||
if isExtraVolumeMountNeeded(caCertsExtraVolumePath) {
|
||||
caCertsExtraVolumeName := strings.Replace(caCertsExtraVolumePath, "/", "-", -1)[1:]
|
||||
mounts.NewHostPathMount(kubeadmconstants.KubeAPIServer, caCertsExtraVolumeName, caCertsExtraVolumePath, caCertsExtraVolumePath, true, &hostPathDirectoryOrCreate)
|
||||
mounts.NewHostPathMount(kubeadmconstants.KubeControllerManager, caCertsExtraVolumeName, caCertsExtraVolumePath, caCertsExtraVolumePath, true, &hostPathDirectoryOrCreate)
|
||||
}
|
||||
}
|
||||
|
||||
// Merge user defined mounts and ensure unique volume and volume mount
|
||||
// names
|
||||
mounts.AddExtraHostPathMounts(kubeadmconstants.KubeAPIServer, cfg.APIServerExtraVolumes, true, &hostPathDirectoryOrCreate)
|
||||
mounts.AddExtraHostPathMounts(kubeadmconstants.KubeControllerManager, cfg.ControllerManagerExtraVolumes, true, &hostPathDirectoryOrCreate)
|
||||
mounts.AddExtraHostPathMounts(kubeadmconstants.KubeScheduler, cfg.SchedulerExtraVolumes, true, &hostPathDirectoryOrCreate)
|
||||
mounts.AddExtraHostPathMounts(kubeadmconstants.KubeAPIServer, cfg.APIServerExtraVolumes)
|
||||
mounts.AddExtraHostPathMounts(kubeadmconstants.KubeControllerManager, cfg.ControllerManagerExtraVolumes)
|
||||
mounts.AddExtraHostPathMounts(kubeadmconstants.KubeScheduler, cfg.SchedulerExtraVolumes)
|
||||
|
||||
return mounts
|
||||
}
|
||||
@ -153,10 +147,11 @@ func (c *controlPlaneHostPathMounts) AddHostPathMounts(component string, vols []
|
||||
|
||||
// AddExtraHostPathMounts adds host path mounts and overwrites the default
|
||||
// paths in the case that a user specifies the same volume/volume mount name.
|
||||
func (c *controlPlaneHostPathMounts) AddExtraHostPathMounts(component string, extraVols []kubeadmapi.HostPathMount, readOnly bool, hostPathType *v1.HostPathType) {
|
||||
func (c *controlPlaneHostPathMounts) AddExtraHostPathMounts(component string, extraVols []kubeadmapi.HostPathMount) {
|
||||
for _, extraVol := range extraVols {
|
||||
fmt.Printf("[controlplane] Adding extra host path mount %q to %q\n", extraVol.Name, component)
|
||||
c.NewHostPathMount(component, extraVol.Name, extraVol.HostPath, extraVol.MountPath, readOnly, hostPathType)
|
||||
hostPathType := extraVol.PathType
|
||||
c.NewHostPathMount(component, extraVol.Name, extraVol.HostPath, extraVol.MountPath, !extraVol.Writable, &hostPathType)
|
||||
}
|
||||
}
|
||||
|
||||
@ -183,7 +178,7 @@ func (c *controlPlaneHostPathMounts) addComponentVolumeMount(component string, v
|
||||
}
|
||||
|
||||
// getEtcdCertVolumes returns the volumes/volumemounts needed for talking to an external etcd cluster
|
||||
func getEtcdCertVolumes(etcdCfg kubeadmapi.Etcd, k8sCertificatesDir string) ([]v1.Volume, []v1.VolumeMount) {
|
||||
func getEtcdCertVolumes(etcdCfg *kubeadmapi.ExternalEtcd, k8sCertificatesDir string) ([]v1.Volume, []v1.VolumeMount) {
|
||||
certPaths := []string{etcdCfg.CAFile, etcdCfg.CertFile, etcdCfg.KeyFile}
|
||||
certDirs := sets.NewString()
|
||||
for _, certPath := range certPaths {
|
||||
@ -191,7 +186,14 @@ func getEtcdCertVolumes(etcdCfg kubeadmapi.Etcd, k8sCertificatesDir string) ([]v
|
||||
// Ignore ".", which is the result of passing an empty path.
|
||||
// Also ignore the cert directories that already may be mounted; /etc/ssl/certs, /etc/pki or Kubernetes CertificatesDir
|
||||
// If the etcd certs are in there, it's okay, we don't have to do anything
|
||||
if certDir == "." || strings.HasPrefix(certDir, caCertsVolumePath) || strings.HasPrefix(certDir, caCertsPkiVolumePath) || strings.HasPrefix(certDir, k8sCertificatesDir) {
|
||||
extraVolumePath := false
|
||||
for _, caCertsExtraVolumePath := range caCertsExtraVolumePaths {
|
||||
if strings.HasPrefix(certDir, caCertsExtraVolumePath) {
|
||||
extraVolumePath = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if certDir == "." || extraVolumePath || strings.HasPrefix(certDir, caCertsVolumePath) || strings.HasPrefix(certDir, k8sCertificatesDir) {
|
||||
continue
|
||||
}
|
||||
// Filter out any existing hostpath mounts in the list that contains a subset of the path
|
||||
@ -222,11 +224,11 @@ func getEtcdCertVolumes(etcdCfg kubeadmapi.Etcd, k8sCertificatesDir string) ([]v
|
||||
return volumes, volumeMounts
|
||||
}
|
||||
|
||||
// isPkiVolumeMountNeeded specifies whether /etc/pki should be host-mounted into the containers
|
||||
// isExtraVolumeMountNeeded specifies whether /etc/pki should be host-mounted into the containers
|
||||
// On some systems were we host-mount /etc/ssl/certs, it is also required to mount /etc/pki. This is needed
|
||||
// due to symlinks pointing from files in /etc/ssl/certs into /etc/pki/
|
||||
func isPkiVolumeMountNeeded() bool {
|
||||
if _, err := os.Stat(caCertsPkiVolumePath); err == nil {
|
||||
func isExtraVolumeMountNeeded(caCertsExtraVolumePath string) bool {
|
||||
if _, err := os.Stat(caCertsExtraVolumePath); err == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
99
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane/volumes_test.go
generated
vendored
99
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane/volumes_test.go
generated
vendored
@ -234,7 +234,7 @@ func TestGetEtcdCertVolumes(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, rt := range tests {
|
||||
actualVol, actualVolMount := getEtcdCertVolumes(kubeadmapi.Etcd{
|
||||
actualVol, actualVolMount := getEtcdCertVolumes(&kubeadmapi.ExternalEtcd{
|
||||
CAFile: rt.ca,
|
||||
CertFile: rt.cert,
|
||||
KeyFile: rt.key,
|
||||
@ -525,10 +525,12 @@ func TestGetHostPathVolumesForTheControlPlane(t *testing.T) {
|
||||
cfg: &kubeadmapi.MasterConfiguration{
|
||||
CertificatesDir: testCertsDir,
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
Endpoints: []string{"foo"},
|
||||
CAFile: "/etc/certs/etcd/my-etcd-ca.crt",
|
||||
CertFile: testCertsDir + "/etcd/my-etcd.crt",
|
||||
KeyFile: "/var/lib/etcd/certs/my-etcd.key",
|
||||
External: &kubeadmapi.ExternalEtcd{
|
||||
Endpoints: []string{"foo"},
|
||||
CAFile: "/etc/certs/etcd/my-etcd-ca.crt",
|
||||
CertFile: testCertsDir + "/etcd/my-etcd.crt",
|
||||
KeyFile: "/var/lib/etcd/certs/my-etcd.key",
|
||||
},
|
||||
},
|
||||
},
|
||||
vol: volMap2,
|
||||
@ -542,9 +544,9 @@ func TestGetHostPathVolumesForTheControlPlane(t *testing.T) {
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
// set up tmp caCertsPkiVolumePath for testing
|
||||
caCertsPkiVolumePath = fmt.Sprintf("%s/etc/pki", tmpdir)
|
||||
defer func() { caCertsPkiVolumePath = "/etc/pki" }()
|
||||
// set up tmp caCertsExtraVolumePaths for testing
|
||||
caCertsExtraVolumePaths = []string{fmt.Sprintf("%s/etc/pki", tmpdir), fmt.Sprintf("%s/usr/share/ca-certificates", tmpdir)}
|
||||
defer func() { caCertsExtraVolumePaths = []string{"/etc/pki", "/usr/share/ca-certificates"} }()
|
||||
|
||||
for _, rt := range tests {
|
||||
mounts := getHostPathVolumesForTheControlPlane(rt.cfg)
|
||||
@ -556,9 +558,6 @@ func TestGetHostPathVolumesForTheControlPlane(t *testing.T) {
|
||||
if _, ok := mounts.volumeMounts[kubeadmconstants.KubeControllerManager][flexvolumeDirVolumeName]; ok {
|
||||
delete(mounts.volumeMounts[kubeadmconstants.KubeControllerManager], flexvolumeDirVolumeName)
|
||||
}
|
||||
if _, ok := mounts.volumeMounts[kubeadmconstants.KubeControllerManager][cloudConfigVolumeName]; ok {
|
||||
delete(mounts.volumeMounts[kubeadmconstants.KubeControllerManager], cloudConfigVolumeName)
|
||||
}
|
||||
if !reflect.DeepEqual(mounts.volumes, rt.vol) {
|
||||
t.Errorf(
|
||||
"failed getHostPathVolumesForTheControlPlane:\n\texpected: %v\n\t actual: %v",
|
||||
@ -615,30 +614,62 @@ func TestAddExtraHostPathMounts(t *testing.T) {
|
||||
mounts.AddHostPathMounts("component", vols, volMounts)
|
||||
hostPathMounts := []kubeadmapi.HostPathMount{
|
||||
{
|
||||
Name: "foo",
|
||||
HostPath: "/tmp/qux",
|
||||
MountPath: "/tmp/qux",
|
||||
Name: "foo-0",
|
||||
HostPath: "/tmp/qux-0",
|
||||
MountPath: "/tmp/qux-0",
|
||||
Writable: false,
|
||||
PathType: v1.HostPathFile,
|
||||
},
|
||||
{
|
||||
Name: "bar-0",
|
||||
HostPath: "/tmp/asd-0",
|
||||
MountPath: "/tmp/asd-0",
|
||||
Writable: true,
|
||||
PathType: v1.HostPathDirectory,
|
||||
},
|
||||
{
|
||||
Name: "foo-1",
|
||||
HostPath: "/tmp/qux-1",
|
||||
MountPath: "/tmp/qux-1",
|
||||
Writable: false,
|
||||
PathType: v1.HostPathFileOrCreate,
|
||||
},
|
||||
{
|
||||
Name: "bar-1",
|
||||
HostPath: "/tmp/asd-1",
|
||||
MountPath: "/tmp/asd-1",
|
||||
Writable: true,
|
||||
PathType: v1.HostPathDirectoryOrCreate,
|
||||
},
|
||||
}
|
||||
mounts.AddExtraHostPathMounts("component", hostPathMounts, true, &hostPathDirectoryOrCreate)
|
||||
if _, ok := mounts.volumes["component"]["foo"]; !ok {
|
||||
t.Errorf("Expected to find volume %q", "foo")
|
||||
}
|
||||
vol, _ := mounts.volumes["component"]["foo"]
|
||||
if vol.Name != "foo" {
|
||||
t.Errorf("Expected volume name %q", "foo")
|
||||
}
|
||||
if vol.HostPath.Path != "/tmp/qux" {
|
||||
t.Errorf("Expected host path %q", "/tmp/qux")
|
||||
}
|
||||
if _, ok := mounts.volumeMounts["component"]["foo"]; !ok {
|
||||
t.Errorf("Expected to find volume mount %q", "foo")
|
||||
}
|
||||
volMount, _ := mounts.volumeMounts["component"]["foo"]
|
||||
if volMount.Name != "foo" {
|
||||
t.Errorf("Expected volume mount name %q", "foo")
|
||||
}
|
||||
if volMount.MountPath != "/tmp/qux" {
|
||||
t.Errorf("Expected container path %q", "/tmp/qux")
|
||||
mounts.AddExtraHostPathMounts("component", hostPathMounts)
|
||||
for _, hostMount := range hostPathMounts {
|
||||
volumeName := hostMount.Name
|
||||
if _, ok := mounts.volumes["component"][volumeName]; !ok {
|
||||
t.Errorf("Expected to find volume %q", volumeName)
|
||||
}
|
||||
vol := mounts.volumes["component"][volumeName]
|
||||
if vol.Name != volumeName {
|
||||
t.Errorf("Expected volume name %q", volumeName)
|
||||
}
|
||||
if vol.HostPath.Path != hostMount.HostPath {
|
||||
t.Errorf("Expected host path %q", hostMount.HostPath)
|
||||
}
|
||||
if _, ok := mounts.volumeMounts["component"][volumeName]; !ok {
|
||||
t.Errorf("Expected to find volume mount %q", volumeName)
|
||||
}
|
||||
if *vol.HostPath.Type != v1.HostPathType(hostMount.PathType) {
|
||||
t.Errorf("Expected to host path type %q", hostMount.PathType)
|
||||
}
|
||||
volMount, _ := mounts.volumeMounts["component"][volumeName]
|
||||
if volMount.Name != volumeName {
|
||||
t.Errorf("Expected volume mount name %q", volumeName)
|
||||
}
|
||||
if volMount.MountPath != hostMount.MountPath {
|
||||
t.Errorf("Expected container path %q", hostMount.MountPath)
|
||||
}
|
||||
if volMount.ReadOnly != !hostMount.Writable {
|
||||
t.Errorf("Expected volume writable setting %t", hostMount.Writable)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd/BUILD
generated
vendored
@ -27,6 +27,7 @@ go_library(
|
||||
"//cmd/kubeadm/app/images:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//cmd/kubeadm/app/util/staticpod:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -40,9 +41,6 @@ filegroup(
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//cmd/kubeadm/app/phases/etcd/spec:all-srcs",
|
||||
],
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
52
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd/local.go
generated
vendored
52
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd/local.go
generated
vendored
@ -20,6 +20,8 @@ import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
@ -30,12 +32,12 @@ import (
|
||||
|
||||
const (
|
||||
etcdVolumeName = "etcd-data"
|
||||
certsVolumeName = "k8s-certs"
|
||||
certsVolumeName = "etcd-certs"
|
||||
)
|
||||
|
||||
// CreateLocalEtcdStaticPodManifestFile will write local etcd static pod manifest file.
|
||||
func CreateLocalEtcdStaticPodManifestFile(manifestDir string, cfg *kubeadmapi.MasterConfiguration) error {
|
||||
|
||||
glog.V(1).Infoln("creating local etcd static pod manifest file")
|
||||
// gets etcd StaticPodSpec, actualized for the current MasterConfiguration
|
||||
spec := GetEtcdPodSpec(cfg)
|
||||
// writes etcd StaticPod to disk
|
||||
@ -52,40 +54,48 @@ func CreateLocalEtcdStaticPodManifestFile(manifestDir string, cfg *kubeadmapi.Ma
|
||||
func GetEtcdPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.Pod {
|
||||
pathType := v1.HostPathDirectoryOrCreate
|
||||
etcdMounts := map[string]v1.Volume{
|
||||
etcdVolumeName: staticpodutil.NewVolume(etcdVolumeName, cfg.Etcd.DataDir, &pathType),
|
||||
certsVolumeName: staticpodutil.NewVolume(certsVolumeName, cfg.CertificatesDir, &pathType),
|
||||
etcdVolumeName: staticpodutil.NewVolume(etcdVolumeName, cfg.Etcd.Local.DataDir, &pathType),
|
||||
certsVolumeName: staticpodutil.NewVolume(certsVolumeName, cfg.CertificatesDir+"/etcd", &pathType),
|
||||
}
|
||||
return staticpodutil.ComponentPod(v1.Container{
|
||||
Name: kubeadmconstants.Etcd,
|
||||
Command: getEtcdCommand(cfg),
|
||||
Image: images.GetCoreImage(kubeadmconstants.Etcd, cfg.ImageRepository, cfg.KubernetesVersion, cfg.Etcd.Image),
|
||||
ImagePullPolicy: cfg.ImagePullPolicy,
|
||||
Image: images.GetCoreImage(kubeadmconstants.Etcd, cfg.ImageRepository, cfg.KubernetesVersion, cfg.Etcd.Local.Image),
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
// Mount the etcd datadir path read-write so etcd can store data in a more persistent manner
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
staticpodutil.NewVolumeMount(etcdVolumeName, cfg.Etcd.DataDir, false),
|
||||
staticpodutil.NewVolumeMount(certsVolumeName, cfg.CertificatesDir, false),
|
||||
staticpodutil.NewVolumeMount(etcdVolumeName, cfg.Etcd.Local.DataDir, false),
|
||||
staticpodutil.NewVolumeMount(certsVolumeName, cfg.CertificatesDir+"/etcd", false),
|
||||
},
|
||||
LivenessProbe: staticpodutil.ComponentProbe(cfg, kubeadmconstants.Etcd, 2379, "/health", v1.URISchemeHTTP),
|
||||
LivenessProbe: staticpodutil.EtcdProbe(
|
||||
cfg, kubeadmconstants.Etcd, 2379, cfg.CertificatesDir,
|
||||
kubeadmconstants.EtcdCACertName, kubeadmconstants.EtcdHealthcheckClientCertName, kubeadmconstants.EtcdHealthcheckClientKeyName,
|
||||
),
|
||||
}, etcdMounts)
|
||||
}
|
||||
|
||||
// getEtcdCommand builds the right etcd command from the given config object
|
||||
func getEtcdCommand(cfg *kubeadmapi.MasterConfiguration) []string {
|
||||
defaultArguments := map[string]string{
|
||||
"listen-client-urls": "https://127.0.0.1:2379",
|
||||
"advertise-client-urls": "https://127.0.0.1:2379",
|
||||
"data-dir": cfg.Etcd.DataDir,
|
||||
"cert-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdServerCertName),
|
||||
"key-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdServerKeyName),
|
||||
"trusted-ca-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.CACertName),
|
||||
"client-cert-auth": "true",
|
||||
"peer-cert-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdPeerCertName),
|
||||
"peer-key-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdPeerKeyName),
|
||||
"peer-trusted-ca-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.CACertName),
|
||||
"peer-client-cert-auth": "true",
|
||||
"name": cfg.GetNodeName(),
|
||||
"listen-client-urls": "https://127.0.0.1:2379",
|
||||
"advertise-client-urls": "https://127.0.0.1:2379",
|
||||
"listen-peer-urls": "https://127.0.0.1:2380",
|
||||
"initial-advertise-peer-urls": "https://127.0.0.1:2380",
|
||||
"data-dir": cfg.Etcd.Local.DataDir,
|
||||
"cert-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdServerCertName),
|
||||
"key-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdServerKeyName),
|
||||
"trusted-ca-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdCACertName),
|
||||
"client-cert-auth": "true",
|
||||
"peer-cert-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdPeerCertName),
|
||||
"peer-key-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdPeerKeyName),
|
||||
"peer-trusted-ca-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdCACertName),
|
||||
"peer-client-cert-auth": "true",
|
||||
"snapshot-count": "10000",
|
||||
"initial-cluster": fmt.Sprintf("%s=https://127.0.0.1:2380", cfg.GetNodeName()),
|
||||
}
|
||||
|
||||
command := []string{"etcd"}
|
||||
command = append(command, kubeadmutil.BuildArgumentListFromMap(defaultArguments, cfg.Etcd.ExtraArgs)...)
|
||||
command = append(command, kubeadmutil.BuildArgumentListFromMap(defaultArguments, cfg.Etcd.Local.ExtraArgs)...)
|
||||
return command
|
||||
}
|
||||
|
112
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd/local_test.go
generated
vendored
112
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd/local_test.go
generated
vendored
@ -34,6 +34,12 @@ func TestGetEtcdPodSpec(t *testing.T) {
|
||||
// Creates a Master Configuration
|
||||
cfg := &kubeadmapi.MasterConfiguration{
|
||||
KubernetesVersion: "v1.7.0",
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
DataDir: "/var/lib/etcd",
|
||||
Image: "",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Executes GetEtcdPodSpec
|
||||
@ -54,6 +60,12 @@ func TestCreateLocalEtcdStaticPodManifestFile(t *testing.T) {
|
||||
// Creates a Master Configuration
|
||||
cfg := &kubeadmapi.MasterConfiguration{
|
||||
KubernetesVersion: "v1.7.0",
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
DataDir: "/var/lib/etcd",
|
||||
Image: "k8s.gcr.io/etcd",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Execute createStaticPodFunction
|
||||
@ -75,65 +87,99 @@ func TestGetEtcdCommand(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
cfg: &kubeadmapi.MasterConfiguration{
|
||||
Etcd: kubeadmapi.Etcd{DataDir: "/var/lib/etcd"},
|
||||
},
|
||||
expected: []string{
|
||||
"etcd",
|
||||
"--listen-client-urls=https://127.0.0.1:2379",
|
||||
"--advertise-client-urls=https://127.0.0.1:2379",
|
||||
"--data-dir=/var/lib/etcd",
|
||||
"--cert-file=" + kubeadmconstants.EtcdServerCertName,
|
||||
"--key-file=" + kubeadmconstants.EtcdServerKeyName,
|
||||
"--trusted-ca-file=" + kubeadmconstants.CACertName,
|
||||
"--client-cert-auth=true",
|
||||
"--peer-cert-file=" + kubeadmconstants.EtcdPeerCertName,
|
||||
"--peer-key-file=" + kubeadmconstants.EtcdPeerKeyName,
|
||||
"--peer-trusted-ca-file=" + kubeadmconstants.CACertName,
|
||||
"--peer-client-cert-auth=true",
|
||||
},
|
||||
},
|
||||
{
|
||||
cfg: &kubeadmapi.MasterConfiguration{
|
||||
NodeRegistration: kubeadmapi.NodeRegistrationOptions{
|
||||
Name: "foo",
|
||||
},
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
DataDir: "/var/lib/etcd",
|
||||
ExtraArgs: map[string]string{
|
||||
"listen-client-urls": "https://10.0.1.10:2379",
|
||||
"advertise-client-urls": "https://10.0.1.10:2379",
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
DataDir: "/var/lib/etcd",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []string{
|
||||
"etcd",
|
||||
"--listen-client-urls=https://10.0.1.10:2379",
|
||||
"--advertise-client-urls=https://10.0.1.10:2379",
|
||||
"--name=foo",
|
||||
"--listen-client-urls=https://127.0.0.1:2379",
|
||||
"--advertise-client-urls=https://127.0.0.1:2379",
|
||||
"--listen-peer-urls=https://127.0.0.1:2380",
|
||||
"--initial-advertise-peer-urls=https://127.0.0.1:2380",
|
||||
"--data-dir=/var/lib/etcd",
|
||||
"--cert-file=" + kubeadmconstants.EtcdServerCertName,
|
||||
"--key-file=" + kubeadmconstants.EtcdServerKeyName,
|
||||
"--trusted-ca-file=" + kubeadmconstants.CACertName,
|
||||
"--trusted-ca-file=" + kubeadmconstants.EtcdCACertName,
|
||||
"--client-cert-auth=true",
|
||||
"--peer-cert-file=" + kubeadmconstants.EtcdPeerCertName,
|
||||
"--peer-key-file=" + kubeadmconstants.EtcdPeerKeyName,
|
||||
"--peer-trusted-ca-file=" + kubeadmconstants.CACertName,
|
||||
"--peer-trusted-ca-file=" + kubeadmconstants.EtcdCACertName,
|
||||
"--snapshot-count=10000",
|
||||
"--peer-client-cert-auth=true",
|
||||
"--initial-cluster=foo=https://127.0.0.1:2380",
|
||||
},
|
||||
},
|
||||
{
|
||||
cfg: &kubeadmapi.MasterConfiguration{
|
||||
Etcd: kubeadmapi.Etcd{DataDir: "/etc/foo"},
|
||||
NodeRegistration: kubeadmapi.NodeRegistrationOptions{
|
||||
Name: "bar",
|
||||
},
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
DataDir: "/var/lib/etcd",
|
||||
ExtraArgs: map[string]string{
|
||||
"listen-client-urls": "https://10.0.1.10:2379",
|
||||
"advertise-client-urls": "https://10.0.1.10:2379",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []string{
|
||||
"etcd",
|
||||
"--listen-client-urls=https://127.0.0.1:2379",
|
||||
"--advertise-client-urls=https://127.0.0.1:2379",
|
||||
"--data-dir=/etc/foo",
|
||||
"--name=bar",
|
||||
"--listen-client-urls=https://10.0.1.10:2379",
|
||||
"--advertise-client-urls=https://10.0.1.10:2379",
|
||||
"--listen-peer-urls=https://127.0.0.1:2380",
|
||||
"--initial-advertise-peer-urls=https://127.0.0.1:2380",
|
||||
"--data-dir=/var/lib/etcd",
|
||||
"--cert-file=" + kubeadmconstants.EtcdServerCertName,
|
||||
"--key-file=" + kubeadmconstants.EtcdServerKeyName,
|
||||
"--trusted-ca-file=" + kubeadmconstants.CACertName,
|
||||
"--trusted-ca-file=" + kubeadmconstants.EtcdCACertName,
|
||||
"--client-cert-auth=true",
|
||||
"--peer-cert-file=" + kubeadmconstants.EtcdPeerCertName,
|
||||
"--peer-key-file=" + kubeadmconstants.EtcdPeerKeyName,
|
||||
"--peer-trusted-ca-file=" + kubeadmconstants.CACertName,
|
||||
"--peer-trusted-ca-file=" + kubeadmconstants.EtcdCACertName,
|
||||
"--snapshot-count=10000",
|
||||
"--peer-client-cert-auth=true",
|
||||
"--initial-cluster=bar=https://127.0.0.1:2380",
|
||||
},
|
||||
},
|
||||
{
|
||||
cfg: &kubeadmapi.MasterConfiguration{
|
||||
NodeRegistration: kubeadmapi.NodeRegistrationOptions{
|
||||
Name: "wombat",
|
||||
},
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
DataDir: "/etc/foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []string{
|
||||
"etcd",
|
||||
"--name=wombat",
|
||||
"--listen-client-urls=https://127.0.0.1:2379",
|
||||
"--advertise-client-urls=https://127.0.0.1:2379",
|
||||
"--listen-peer-urls=https://127.0.0.1:2380",
|
||||
"--initial-advertise-peer-urls=https://127.0.0.1:2380",
|
||||
"--data-dir=/etc/foo",
|
||||
"--cert-file=" + kubeadmconstants.EtcdServerCertName,
|
||||
"--key-file=" + kubeadmconstants.EtcdServerKeyName,
|
||||
"--trusted-ca-file=" + kubeadmconstants.EtcdCACertName,
|
||||
"--client-cert-auth=true",
|
||||
"--peer-cert-file=" + kubeadmconstants.EtcdPeerCertName,
|
||||
"--peer-key-file=" + kubeadmconstants.EtcdPeerKeyName,
|
||||
"--peer-trusted-ca-file=" + kubeadmconstants.EtcdCACertName,
|
||||
"--snapshot-count=10000",
|
||||
"--peer-client-cert-auth=true",
|
||||
"--initial-cluster=wombat=https://127.0.0.1:2380",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
19
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd/spec/doc.go
generated
vendored
19
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd/spec/doc.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
|
||||
package spec
|
205
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd/spec/spec.go
generated
vendored
205
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd/spec/spec.go
generated
vendored
@ -1,205 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file was collated from types used in:
|
||||
// https://github.com/coreos/etcd-operator/tree/e7f18696bbdc127fa028a99ca8166a8519749328/pkg/apis/etcd/v1beta2.
|
||||
// When kubeadm moves to its own repo and controls its own dependencies,
|
||||
// this file will be no longer be needed.
|
||||
|
||||
package spec
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
const (
|
||||
// CRDResourceKind is the CRD resource kind
|
||||
CRDResourceKind = "EtcdCluster"
|
||||
// CRDResourcePlural is the CRD resource plural
|
||||
CRDResourcePlural = "etcdclusters"
|
||||
groupName = "etcd.database.coreos.com"
|
||||
)
|
||||
|
||||
var (
|
||||
// SchemeBuilder is a scheme builder
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(AddKnownTypes)
|
||||
// AddToScheme adds to the scheme
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
// SchemeGroupVersion is the scheme version
|
||||
SchemeGroupVersion = schema.GroupVersion{Group: groupName, Version: "v1beta2"}
|
||||
// CRDName is the name of the CRD
|
||||
CRDName = CRDResourcePlural + "." + groupName
|
||||
)
|
||||
|
||||
// Resource gets an EtcdCluster GroupResource for a specified resource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
// AddKnownTypes adds the set of types defined in this package to the supplied scheme.
|
||||
func AddKnownTypes(s *runtime.Scheme) error {
|
||||
s.AddKnownTypes(SchemeGroupVersion,
|
||||
&EtcdCluster{},
|
||||
&EtcdClusterList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(s, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// EtcdClusterList is a list of etcd clusters.
|
||||
type EtcdClusterList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []EtcdCluster `json:"items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// EtcdCluster represents an etcd cluster
|
||||
type EtcdCluster struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
Spec ClusterSpec `json:"spec"`
|
||||
}
|
||||
|
||||
// ClusterSpec represents a cluster spec
|
||||
type ClusterSpec struct {
|
||||
// Size is the expected size of the etcd cluster.
|
||||
// The etcd-operator will eventually make the size of the running
|
||||
// cluster equal to the expected size.
|
||||
// The vaild range of the size is from 1 to 7.
|
||||
Size int `json:"size"`
|
||||
|
||||
// BaseImage is the base etcd image name that will be used to launch
|
||||
// etcd clusters. This is useful for private registries, etc.
|
||||
//
|
||||
// If image is not set, default is quay.io/coreos/etcd
|
||||
BaseImage string `json:"baseImage"`
|
||||
|
||||
// Version is the expected version of the etcd cluster.
|
||||
// The etcd-operator will eventually make the etcd cluster version
|
||||
// equal to the expected version.
|
||||
//
|
||||
// The version must follow the [semver]( http://semver.org) format, for example "3.1.8".
|
||||
// Only etcd released versions are supported: https://github.com/coreos/etcd/releases
|
||||
//
|
||||
// If version is not set, default is "3.1.8".
|
||||
Version string `json:"version,omitempty"`
|
||||
|
||||
// Paused is to pause the control of the operator for the etcd cluster.
|
||||
Paused bool `json:"paused,omitempty"`
|
||||
|
||||
// Pod defines the policy to create pod for the etcd pod.
|
||||
//
|
||||
// Updating Pod does not take effect on any existing etcd pods.
|
||||
Pod *PodPolicy `json:"pod,omitempty"`
|
||||
|
||||
// SelfHosted determines if the etcd cluster is used for a self-hosted
|
||||
// Kubernetes cluster.
|
||||
//
|
||||
// SelfHosted is a cluster initialization configuration. It cannot be updated.
|
||||
SelfHosted *SelfHostedPolicy `json:"selfHosted,omitempty"`
|
||||
|
||||
// etcd cluster TLS configuration
|
||||
TLS *TLSPolicy `json:"TLS,omitempty"`
|
||||
}
|
||||
|
||||
// PodPolicy defines the policy to create pod for the etcd container.
|
||||
type PodPolicy struct {
|
||||
// Labels specifies the labels to attach to pods the operator creates for the
|
||||
// etcd cluster.
|
||||
// "app" and "etcd_*" labels are reserved for the internal use of the etcd operator.
|
||||
// Do not overwrite them.
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
|
||||
// NodeSelector specifies a map of key-value pairs. For the pod to be eligible
|
||||
// to run on a node, the node must have each of the indicated key-value pairs as
|
||||
// labels.
|
||||
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
||||
|
||||
// AntiAffinity determines if the etcd-operator tries to avoid putting
|
||||
// the etcd members in the same cluster onto the same node.
|
||||
AntiAffinity bool `json:"antiAffinity,omitempty"`
|
||||
|
||||
// Resources is the resource requirements for the etcd container.
|
||||
// This field cannot be updated once the cluster is created.
|
||||
Resources v1.ResourceRequirements `json:"resources,omitempty"`
|
||||
|
||||
// Tolerations specifies the pod's tolerations.
|
||||
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
|
||||
|
||||
// List of environment variables to set in the etcd container.
|
||||
// This is used to configure etcd process. etcd cluster cannot be created, when
|
||||
// bad environement variables are provided. Do not overwrite any flags used to
|
||||
// bootstrap the cluster (for example `--initial-cluster` flag).
|
||||
// This field cannot be updated.
|
||||
EtcdEnv []v1.EnvVar `json:"etcdEnv,omitempty"`
|
||||
|
||||
// By default, kubernetes will mount a service account token into the etcd pods.
|
||||
// AutomountServiceAccountToken indicates whether pods running with the service account should have an API token automatically mounted.
|
||||
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"`
|
||||
}
|
||||
|
||||
// TLSPolicy defines the TLS policy of an etcd cluster
|
||||
type TLSPolicy struct {
|
||||
// StaticTLS enables user to generate static x509 certificates and keys,
|
||||
// put them into Kubernetes secrets, and specify them into here.
|
||||
Static *StaticTLS `json:"static,omitempty"`
|
||||
}
|
||||
|
||||
// StaticTLS represents static TLS
|
||||
type StaticTLS struct {
|
||||
// Member contains secrets containing TLS certs used by each etcd member pod.
|
||||
Member *MemberSecret `json:"member,omitempty"`
|
||||
// OperatorSecret is the secret containing TLS certs used by operator to
|
||||
// talk securely to this cluster.
|
||||
OperatorSecret string `json:"operatorSecret,omitempty"`
|
||||
}
|
||||
|
||||
// MemberSecret represents a member secret
|
||||
type MemberSecret struct {
|
||||
// PeerSecret is the secret containing TLS certs used by each etcd member pod
|
||||
// for the communication between etcd peers.
|
||||
PeerSecret string `json:"peerSecret,omitempty"`
|
||||
// ServerSecret is the secret containing TLS certs used by each etcd member pod
|
||||
// for the communication between etcd server and its clients.
|
||||
ServerSecret string `json:"serverSecret,omitempty"`
|
||||
}
|
||||
|
||||
// SelfHostedPolicy represents a self-hosted policy
|
||||
type SelfHostedPolicy struct {
|
||||
// BootMemberClientEndpoint specifies a bootstrap member for the cluster.
|
||||
// If there is no bootstrap member, a completely new cluster will be created.
|
||||
// The boot member will be removed from the cluster once the self-hosted cluster
|
||||
// setup successfully.
|
||||
BootMemberClientEndpoint string `json:"bootMemberClientEndpoint,omitempty"`
|
||||
|
||||
// SkipBootMemberRemoval specifies whether the removal of the bootstrap member
|
||||
// should be skipped. By default the operator will automatically remove the
|
||||
// bootstrap member from the new cluster - this happens during the pivot
|
||||
// procedure and is the first step of decommissioning the bootstrap member.
|
||||
// If unspecified, the default is `false`. If set to `true`, you are
|
||||
// expected to remove the boot member yourself from the etcd cluster.
|
||||
SkipBootMemberRemoval bool `json:"skipBootMemberRemoval,omitempty"`
|
||||
}
|
265
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd/spec/zz_generated.deepcopy.go
generated
vendored
265
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd/spec/zz_generated.deepcopy.go
generated
vendored
@ -1,265 +0,0 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package spec
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
|
||||
*out = *in
|
||||
if in.Pod != nil {
|
||||
in, out := &in.Pod, &out.Pod
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(PodPolicy)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
if in.SelfHosted != nil {
|
||||
in, out := &in.SelfHosted, &out.SelfHosted
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(SelfHostedPolicy)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.TLS != nil {
|
||||
in, out := &in.TLS, &out.TLS
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(TLSPolicy)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec.
|
||||
func (in *ClusterSpec) DeepCopy() *ClusterSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EtcdCluster) DeepCopyInto(out *EtcdCluster) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdCluster.
|
||||
func (in *EtcdCluster) DeepCopy() *EtcdCluster {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EtcdCluster)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *EtcdCluster) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EtcdClusterList) DeepCopyInto(out *EtcdClusterList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]EtcdCluster, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdClusterList.
|
||||
func (in *EtcdClusterList) DeepCopy() *EtcdClusterList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EtcdClusterList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *EtcdClusterList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MemberSecret) DeepCopyInto(out *MemberSecret) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemberSecret.
|
||||
func (in *MemberSecret) DeepCopy() *MemberSecret {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MemberSecret)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodPolicy) DeepCopyInto(out *PodPolicy) {
|
||||
*out = *in
|
||||
if in.Labels != nil {
|
||||
in, out := &in.Labels, &out.Labels
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
in.Resources.DeepCopyInto(&out.Resources)
|
||||
if in.Tolerations != nil {
|
||||
in, out := &in.Tolerations, &out.Tolerations
|
||||
*out = make([]v1.Toleration, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.EtcdEnv != nil {
|
||||
in, out := &in.EtcdEnv, &out.EtcdEnv
|
||||
*out = make([]v1.EnvVar, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.AutomountServiceAccountToken != nil {
|
||||
in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPolicy.
|
||||
func (in *PodPolicy) DeepCopy() *PodPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SelfHostedPolicy) DeepCopyInto(out *SelfHostedPolicy) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfHostedPolicy.
|
||||
func (in *SelfHostedPolicy) DeepCopy() *SelfHostedPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SelfHostedPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StaticTLS) DeepCopyInto(out *StaticTLS) {
|
||||
*out = *in
|
||||
if in.Member != nil {
|
||||
in, out := &in.Member, &out.Member
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(MemberSecret)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticTLS.
|
||||
func (in *StaticTLS) DeepCopy() *StaticTLS {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StaticTLS)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TLSPolicy) DeepCopyInto(out *TLSPolicy) {
|
||||
*out = *in
|
||||
if in.Static != nil {
|
||||
in, out := &in.Static, &out.Static
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(StaticTLS)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSPolicy.
|
||||
func (in *TLSPolicy) DeepCopy() *TLSPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TLSPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
1
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig/BUILD
generated
vendored
@ -19,6 +19,7 @@ go_library(
|
||||
"//cmd/kubeadm/app/phases/certs/pkiutil:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//cmd/kubeadm/app/util/kubeconfig:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/cert:go_default_library",
|
||||
|
36
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go
generated
vendored
36
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go
generated
vendored
@ -26,6 +26,8 @@ import (
|
||||
|
||||
"crypto/rsa"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
@ -60,6 +62,7 @@ type kubeConfigSpec struct {
|
||||
// to establish the control plane, including also the admin kubeconfig file.
|
||||
// If kubeconfig files already exists, they are used only if evaluated equal; otherwise an error is returned.
|
||||
func CreateInitKubeConfigFiles(outDir string, cfg *kubeadmapi.MasterConfiguration) error {
|
||||
glog.V(1).Infoln("creating all kubeconfig files")
|
||||
return createKubeConfigFiles(
|
||||
outDir,
|
||||
cfg,
|
||||
@ -73,24 +76,28 @@ func CreateInitKubeConfigFiles(outDir string, cfg *kubeadmapi.MasterConfiguratio
|
||||
// CreateAdminKubeConfigFile create a kubeconfig file for the admin to use and for kubeadm itself.
|
||||
// If the kubeconfig file already exists, it is used only if evaluated equal; otherwise an error is returned.
|
||||
func CreateAdminKubeConfigFile(outDir string, cfg *kubeadmapi.MasterConfiguration) error {
|
||||
glog.V(1).Infoln("create a kubeconfig file for the admin and for kubeadm itself")
|
||||
return createKubeConfigFiles(outDir, cfg, kubeadmconstants.AdminKubeConfigFileName)
|
||||
}
|
||||
|
||||
// CreateKubeletKubeConfigFile create a kubeconfig file for the Kubelet to use.
|
||||
// If the kubeconfig file already exists, it is used only if evaluated equal; otherwise an error is returned.
|
||||
func CreateKubeletKubeConfigFile(outDir string, cfg *kubeadmapi.MasterConfiguration) error {
|
||||
glog.V(1).Infoln("creating a kubeconfig file for the Kubelet")
|
||||
return createKubeConfigFiles(outDir, cfg, kubeadmconstants.KubeletKubeConfigFileName)
|
||||
}
|
||||
|
||||
// CreateControllerManagerKubeConfigFile create a kubeconfig file for the ControllerManager to use.
|
||||
// If the kubeconfig file already exists, it is used only if evaluated equal; otherwise an error is returned.
|
||||
func CreateControllerManagerKubeConfigFile(outDir string, cfg *kubeadmapi.MasterConfiguration) error {
|
||||
glog.V(1).Infoln("creating kubeconfig file for the ControllerManager")
|
||||
return createKubeConfigFiles(outDir, cfg, kubeadmconstants.ControllerManagerKubeConfigFileName)
|
||||
}
|
||||
|
||||
// CreateSchedulerKubeConfigFile create a create a kubeconfig file for the Scheduler to use.
|
||||
// If the kubeconfig file already exists, it is used only if evaluated equal; otherwise an error is returned.
|
||||
func CreateSchedulerKubeConfigFile(outDir string, cfg *kubeadmapi.MasterConfiguration) error {
|
||||
glog.V(1).Infoln("creating kubeconfig file for Scheduler")
|
||||
return createKubeConfigFiles(outDir, cfg, kubeadmconstants.SchedulerKubeConfigFileName)
|
||||
}
|
||||
|
||||
@ -112,7 +119,7 @@ func createKubeConfigFiles(outDir string, cfg *kubeadmapi.MasterConfiguration, k
|
||||
}
|
||||
|
||||
// builds the KubeConfig object
|
||||
config, err := buildKubeConfigFromSpec(spec)
|
||||
config, err := buildKubeConfigFromSpec(spec, cfg.ClusterName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -135,7 +142,7 @@ func getKubeConfigSpecs(cfg *kubeadmapi.MasterConfiguration) (map[string]*kubeCo
|
||||
return nil, fmt.Errorf("couldn't create a kubeconfig; the CA files couldn't be loaded: %v", err)
|
||||
}
|
||||
|
||||
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(cfg)
|
||||
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(&cfg.API)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -153,7 +160,7 @@ func getKubeConfigSpecs(cfg *kubeadmapi.MasterConfiguration) (map[string]*kubeCo
|
||||
kubeadmconstants.KubeletKubeConfigFileName: {
|
||||
CACert: caCert,
|
||||
APIServer: masterEndpoint,
|
||||
ClientName: fmt.Sprintf("system:node:%s", cfg.NodeName),
|
||||
ClientName: fmt.Sprintf("system:node:%s", cfg.NodeRegistration.Name),
|
||||
ClientCertAuth: &clientCertAuth{
|
||||
CAKey: caKey,
|
||||
Organizations: []string{kubeadmconstants.NodesGroup},
|
||||
@ -181,14 +188,14 @@ func getKubeConfigSpecs(cfg *kubeadmapi.MasterConfiguration) (map[string]*kubeCo
|
||||
}
|
||||
|
||||
// buildKubeConfigFromSpec creates a kubeconfig object for the given kubeConfigSpec
|
||||
func buildKubeConfigFromSpec(spec *kubeConfigSpec) (*clientcmdapi.Config, error) {
|
||||
func buildKubeConfigFromSpec(spec *kubeConfigSpec, clustername string) (*clientcmdapi.Config, error) {
|
||||
|
||||
// If this kubeconfig should use token
|
||||
if spec.TokenAuth != nil {
|
||||
// create a kubeconfig with a token
|
||||
return kubeconfigutil.CreateWithToken(
|
||||
spec.APIServer,
|
||||
"kubernetes",
|
||||
clustername,
|
||||
spec.ClientName,
|
||||
certutil.EncodeCertPEM(spec.CACert),
|
||||
spec.TokenAuth.Token,
|
||||
@ -209,7 +216,7 @@ func buildKubeConfigFromSpec(spec *kubeConfigSpec) (*clientcmdapi.Config, error)
|
||||
// create a kubeconfig with the client certs
|
||||
return kubeconfigutil.CreateWithCerts(
|
||||
spec.APIServer,
|
||||
"kubernetes",
|
||||
clustername,
|
||||
spec.ClientName,
|
||||
certutil.EncodeCertPEM(spec.CACert),
|
||||
certutil.EncodePrivateKeyPEM(clientKey),
|
||||
@ -264,7 +271,7 @@ func createKubeConfigFileIfNotExists(outDir, filename string, config *clientcmda
|
||||
}
|
||||
|
||||
// WriteKubeConfigWithClientCert writes a kubeconfig file - with a client certificate as authentication info - to the given writer.
|
||||
func WriteKubeConfigWithClientCert(out io.Writer, cfg *kubeadmapi.MasterConfiguration, clientName string) error {
|
||||
func WriteKubeConfigWithClientCert(out io.Writer, cfg *kubeadmapi.MasterConfiguration, clientName string, organizations []string) error {
|
||||
|
||||
// creates the KubeConfigSpecs, actualized for the current MasterConfiguration
|
||||
caCert, caKey, err := pkiutil.TryLoadCertAndKeyFromDisk(cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName)
|
||||
@ -272,7 +279,7 @@ func WriteKubeConfigWithClientCert(out io.Writer, cfg *kubeadmapi.MasterConfigur
|
||||
return fmt.Errorf("couldn't create a kubeconfig; the CA files couldn't be loaded: %v", err)
|
||||
}
|
||||
|
||||
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(cfg)
|
||||
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(&cfg.API)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -282,11 +289,12 @@ func WriteKubeConfigWithClientCert(out io.Writer, cfg *kubeadmapi.MasterConfigur
|
||||
APIServer: masterEndpoint,
|
||||
CACert: caCert,
|
||||
ClientCertAuth: &clientCertAuth{
|
||||
CAKey: caKey,
|
||||
CAKey: caKey,
|
||||
Organizations: organizations,
|
||||
},
|
||||
}
|
||||
|
||||
return writeKubeConfigFromSpec(out, spec)
|
||||
return writeKubeConfigFromSpec(out, spec, cfg.ClusterName)
|
||||
}
|
||||
|
||||
// WriteKubeConfigWithToken writes a kubeconfig file - with a token as client authentication info - to the given writer.
|
||||
@ -298,7 +306,7 @@ func WriteKubeConfigWithToken(out io.Writer, cfg *kubeadmapi.MasterConfiguration
|
||||
return fmt.Errorf("couldn't create a kubeconfig; the CA files couldn't be loaded: %v", err)
|
||||
}
|
||||
|
||||
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(cfg)
|
||||
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(&cfg.API)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -312,14 +320,14 @@ func WriteKubeConfigWithToken(out io.Writer, cfg *kubeadmapi.MasterConfiguration
|
||||
},
|
||||
}
|
||||
|
||||
return writeKubeConfigFromSpec(out, spec)
|
||||
return writeKubeConfigFromSpec(out, spec, cfg.ClusterName)
|
||||
}
|
||||
|
||||
// writeKubeConfigFromSpec creates a kubeconfig object from a kubeConfigSpec and writes it to the given writer.
|
||||
func writeKubeConfigFromSpec(out io.Writer, spec *kubeConfigSpec) error {
|
||||
func writeKubeConfigFromSpec(out io.Writer, spec *kubeConfigSpec, clustername string) error {
|
||||
|
||||
// builds the KubeConfig object
|
||||
config, err := buildKubeConfigFromSpec(spec)
|
||||
config, err := buildKubeConfigFromSpec(spec, clustername)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
160
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go
generated
vendored
160
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go
generated
vendored
@ -64,94 +64,76 @@ func TestGetKubeConfigSpecs(t *testing.T) {
|
||||
// Adds a pki folder with a ca certs to the temp folder
|
||||
pkidir := testutil.SetupPkiDirWithCertificateAuthorithy(t, tmpdir)
|
||||
|
||||
// Creates a Master Configuration pointing to the pkidir folder
|
||||
cfg := &kubeadmapi.MasterConfiguration{
|
||||
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", BindPort: 1234},
|
||||
CertificatesDir: pkidir,
|
||||
NodeName: "valid-node-name",
|
||||
}
|
||||
|
||||
// Creates a Master Configuration pointing to the pkidir folder
|
||||
cfgDNS := &kubeadmapi.MasterConfiguration{
|
||||
API: kubeadmapi.API{ControlPlaneEndpoint: "api.k8s.io", BindPort: 1234},
|
||||
CertificatesDir: pkidir,
|
||||
NodeName: "valid-node-name",
|
||||
}
|
||||
|
||||
// Executes getKubeConfigSpecs
|
||||
specs, err := getKubeConfigSpecs(cfg)
|
||||
if err != nil {
|
||||
t.Fatal("getKubeConfigSpecs failed!")
|
||||
}
|
||||
|
||||
// Executes getKubeConfigSpecs
|
||||
specsDNS, err := getKubeConfigSpecs(cfgDNS)
|
||||
if err != nil {
|
||||
t.Fatal("getKubeConfigSpecs failed!")
|
||||
}
|
||||
|
||||
var assertions = []struct {
|
||||
kubeConfigFile string
|
||||
clientName string
|
||||
organizations []string
|
||||
}{
|
||||
// Creates Master Configurations pointing to the pkidir folder
|
||||
cfgs := []*kubeadmapi.MasterConfiguration{
|
||||
{
|
||||
kubeConfigFile: kubeadmconstants.AdminKubeConfigFileName,
|
||||
clientName: "kubernetes-admin",
|
||||
organizations: []string{kubeadmconstants.MastersGroup},
|
||||
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", BindPort: 1234},
|
||||
CertificatesDir: pkidir,
|
||||
NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-node-name"},
|
||||
},
|
||||
{
|
||||
kubeConfigFile: kubeadmconstants.KubeletKubeConfigFileName,
|
||||
clientName: fmt.Sprintf("system:node:%s", cfg.NodeName),
|
||||
organizations: []string{kubeadmconstants.NodesGroup},
|
||||
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", ControlPlaneEndpoint: "api.k8s.io", BindPort: 1234},
|
||||
CertificatesDir: pkidir,
|
||||
NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-node-name"},
|
||||
},
|
||||
{
|
||||
kubeConfigFile: kubeadmconstants.ControllerManagerKubeConfigFileName,
|
||||
clientName: kubeadmconstants.ControllerManagerUser,
|
||||
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", ControlPlaneEndpoint: "api.k8s.io:4321", BindPort: 1234},
|
||||
CertificatesDir: pkidir,
|
||||
NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-node-name"},
|
||||
},
|
||||
{
|
||||
kubeConfigFile: kubeadmconstants.SchedulerKubeConfigFileName,
|
||||
clientName: kubeadmconstants.SchedulerUser,
|
||||
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", ControlPlaneEndpoint: "api.k8s.io", BindPort: 1234},
|
||||
CertificatesDir: pkidir,
|
||||
NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-node-name"},
|
||||
},
|
||||
{
|
||||
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", ControlPlaneEndpoint: "api.k8s.io:4321", BindPort: 1234},
|
||||
CertificatesDir: pkidir,
|
||||
NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-node-name"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, assertion := range assertions {
|
||||
|
||||
// assert the spec for the kubeConfigFile exists
|
||||
if spec, ok := specs[assertion.kubeConfigFile]; ok {
|
||||
|
||||
// Assert clientName
|
||||
if spec.ClientName != assertion.clientName {
|
||||
t.Errorf("getKubeConfigSpecs for %s clientName is %s, expected %s", assertion.kubeConfigFile, spec.ClientName, assertion.clientName)
|
||||
}
|
||||
|
||||
// Assert Organizations
|
||||
if spec.ClientCertAuth == nil || !reflect.DeepEqual(spec.ClientCertAuth.Organizations, assertion.organizations) {
|
||||
t.Errorf("getKubeConfigSpecs for %s Organizations is %v, expected %v", assertion.kubeConfigFile, spec.ClientCertAuth.Organizations, assertion.organizations)
|
||||
}
|
||||
|
||||
// Asserts MasterConfiguration values injected into spec
|
||||
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(cfg)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if spec.APIServer != masterEndpoint {
|
||||
t.Errorf("getKubeConfigSpecs didn't injected cfg.APIServer endpoint into spec for %s", assertion.kubeConfigFile)
|
||||
}
|
||||
|
||||
// Asserts CA certs and CA keys loaded into specs
|
||||
if spec.CACert == nil {
|
||||
t.Errorf("getKubeConfigSpecs didn't loaded CACert into spec for %s!", assertion.kubeConfigFile)
|
||||
}
|
||||
if spec.ClientCertAuth == nil || spec.ClientCertAuth.CAKey == nil {
|
||||
t.Errorf("getKubeConfigSpecs didn't loaded CAKey into spec for %s!", assertion.kubeConfigFile)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("getKubeConfigSpecs didn't create spec for %s ", assertion.kubeConfigFile)
|
||||
for _, cfg := range cfgs {
|
||||
var assertions = []struct {
|
||||
kubeConfigFile string
|
||||
clientName string
|
||||
organizations []string
|
||||
}{
|
||||
{
|
||||
kubeConfigFile: kubeadmconstants.AdminKubeConfigFileName,
|
||||
clientName: "kubernetes-admin",
|
||||
organizations: []string{kubeadmconstants.MastersGroup},
|
||||
},
|
||||
{
|
||||
kubeConfigFile: kubeadmconstants.KubeletKubeConfigFileName,
|
||||
clientName: fmt.Sprintf("system:node:%s", cfg.NodeRegistration.Name),
|
||||
organizations: []string{kubeadmconstants.NodesGroup},
|
||||
},
|
||||
{
|
||||
kubeConfigFile: kubeadmconstants.ControllerManagerKubeConfigFileName,
|
||||
clientName: kubeadmconstants.ControllerManagerUser,
|
||||
},
|
||||
{
|
||||
kubeConfigFile: kubeadmconstants.SchedulerKubeConfigFileName,
|
||||
clientName: kubeadmconstants.SchedulerUser,
|
||||
},
|
||||
}
|
||||
|
||||
// assert the spec for the kubeConfigFile exists
|
||||
if spec, ok := specsDNS[assertion.kubeConfigFile]; ok {
|
||||
for _, assertion := range assertions {
|
||||
// Executes getKubeConfigSpecs
|
||||
specs, err := getKubeConfigSpecs(cfg)
|
||||
if err != nil {
|
||||
t.Fatal("getKubeConfigSpecs failed!")
|
||||
}
|
||||
|
||||
var spec *kubeConfigSpec
|
||||
var ok bool
|
||||
|
||||
// assert the spec for the kubeConfigFile exists
|
||||
if spec, ok = specs[assertion.kubeConfigFile]; !ok {
|
||||
t.Errorf("getKubeConfigSpecs didn't create spec for %s ", assertion.kubeConfigFile)
|
||||
continue
|
||||
}
|
||||
|
||||
// Assert clientName
|
||||
if spec.ClientName != assertion.clientName {
|
||||
@ -164,7 +146,7 @@ func TestGetKubeConfigSpecs(t *testing.T) {
|
||||
}
|
||||
|
||||
// Asserts MasterConfiguration values injected into spec
|
||||
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(cfgDNS)
|
||||
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(&cfg.API)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -179,8 +161,6 @@ func TestGetKubeConfigSpecs(t *testing.T) {
|
||||
if spec.ClientCertAuth == nil || spec.ClientCertAuth.CAKey == nil {
|
||||
t.Errorf("getKubeConfigSpecs didn't loaded CAKey into spec for %s!", assertion.kubeConfigFile)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("getKubeConfigSpecs didn't create spec for %s ", assertion.kubeConfigFile)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -190,7 +170,7 @@ func TestBuildKubeConfigFromSpecWithClientAuth(t *testing.T) {
|
||||
caCert, caKey := certstestutil.SetupCertificateAuthorithy(t)
|
||||
|
||||
// Executes buildKubeConfigFromSpec passing a KubeConfigSpec with a ClientAuth
|
||||
config := setupdKubeConfigWithClientAuth(t, caCert, caKey, "https://1.2.3.4:1234", "myClientName", "myOrg1", "myOrg2")
|
||||
config := setupdKubeConfigWithClientAuth(t, caCert, caKey, "https://1.2.3.4:1234", "myClientName", "test-cluster", "myOrg1", "myOrg2")
|
||||
|
||||
// Asserts spec data are propagated to the kubeconfig
|
||||
kubeconfigtestutil.AssertKubeConfigCurrentCluster(t, config, "https://1.2.3.4:1234", caCert)
|
||||
@ -202,7 +182,7 @@ func TestBuildKubeConfigFromSpecWithTokenAuth(t *testing.T) {
|
||||
caCert, _ := certstestutil.SetupCertificateAuthorithy(t)
|
||||
|
||||
// Executes buildKubeConfigFromSpec passing a KubeConfigSpec with a Token
|
||||
config := setupdKubeConfigWithTokenAuth(t, caCert, "https://1.2.3.4:1234", "myClientName", "123456")
|
||||
config := setupdKubeConfigWithTokenAuth(t, caCert, "https://1.2.3.4:1234", "myClientName", "123456", "test-cluster")
|
||||
|
||||
// Asserts spec data are propagated to the kubeconfig
|
||||
kubeconfigtestutil.AssertKubeConfigCurrentCluster(t, config, "https://1.2.3.4:1234", caCert)
|
||||
@ -216,9 +196,9 @@ func TestCreateKubeConfigFileIfNotExists(t *testing.T) {
|
||||
anotherCaCert, anotherCaKey := certstestutil.SetupCertificateAuthorithy(t)
|
||||
|
||||
// build kubeconfigs (to be used to test kubeconfigs equality/not equality)
|
||||
config := setupdKubeConfigWithClientAuth(t, caCert, caKey, "https://1.2.3.4:1234", "myOrg1", "myOrg2")
|
||||
configWithAnotherClusterCa := setupdKubeConfigWithClientAuth(t, anotherCaCert, anotherCaKey, "https://1.2.3.4:1234", "myOrg1", "myOrg2")
|
||||
configWithAnotherClusterAddress := setupdKubeConfigWithClientAuth(t, caCert, caKey, "https://3.4.5.6:3456", "myOrg1", "myOrg2")
|
||||
config := setupdKubeConfigWithClientAuth(t, caCert, caKey, "https://1.2.3.4:1234", "test-cluster", "myOrg1", "myOrg2")
|
||||
configWithAnotherClusterCa := setupdKubeConfigWithClientAuth(t, anotherCaCert, anotherCaKey, "https://1.2.3.4:1234", "test-cluster", "myOrg1", "myOrg2")
|
||||
configWithAnotherClusterAddress := setupdKubeConfigWithClientAuth(t, caCert, caKey, "https://3.4.5.6:3456", "myOrg1", "test-cluster", "myOrg2")
|
||||
|
||||
var tests = []struct {
|
||||
existingKubeConfig *clientcmdapi.Config
|
||||
@ -355,7 +335,7 @@ func TestWriteKubeConfigFailsIfCADoesntExists(t *testing.T) {
|
||||
}{
|
||||
{ // Test WriteKubeConfigWithClientCert
|
||||
writeKubeConfigFunction: func(out io.Writer) error {
|
||||
return WriteKubeConfigWithClientCert(out, cfg, "myUser")
|
||||
return WriteKubeConfigWithClientCert(out, cfg, "myUser", []string{"myOrg"})
|
||||
},
|
||||
},
|
||||
{ // Test WriteKubeConfigWithToken
|
||||
@ -403,7 +383,7 @@ func TestWriteKubeConfig(t *testing.T) {
|
||||
}{
|
||||
{ // Test WriteKubeConfigWithClientCert
|
||||
writeKubeConfigFunction: func(out io.Writer) error {
|
||||
return WriteKubeConfigWithClientCert(out, cfg, "myUser")
|
||||
return WriteKubeConfigWithClientCert(out, cfg, "myUser", []string{"myOrg"})
|
||||
},
|
||||
withClientCert: true,
|
||||
},
|
||||
@ -447,7 +427,7 @@ func TestWriteKubeConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
// setupdKubeConfigWithClientAuth is a test utility function that wraps buildKubeConfigFromSpec for building a KubeConfig object With ClientAuth
|
||||
func setupdKubeConfigWithClientAuth(t *testing.T, caCert *x509.Certificate, caKey *rsa.PrivateKey, APIServer, clientName string, organizations ...string) *clientcmdapi.Config {
|
||||
func setupdKubeConfigWithClientAuth(t *testing.T, caCert *x509.Certificate, caKey *rsa.PrivateKey, APIServer, clientName, clustername string, organizations ...string) *clientcmdapi.Config {
|
||||
spec := &kubeConfigSpec{
|
||||
CACert: caCert,
|
||||
APIServer: APIServer,
|
||||
@ -458,7 +438,7 @@ func setupdKubeConfigWithClientAuth(t *testing.T, caCert *x509.Certificate, caKe
|
||||
},
|
||||
}
|
||||
|
||||
config, err := buildKubeConfigFromSpec(spec)
|
||||
config, err := buildKubeConfigFromSpec(spec, clustername)
|
||||
if err != nil {
|
||||
t.Fatal("buildKubeConfigFromSpec failed!")
|
||||
}
|
||||
@ -467,7 +447,7 @@ func setupdKubeConfigWithClientAuth(t *testing.T, caCert *x509.Certificate, caKe
|
||||
}
|
||||
|
||||
// setupdKubeConfigWithClientAuth is a test utility function that wraps buildKubeConfigFromSpec for building a KubeConfig object With Token
|
||||
func setupdKubeConfigWithTokenAuth(t *testing.T, caCert *x509.Certificate, APIServer, clientName, token string) *clientcmdapi.Config {
|
||||
func setupdKubeConfigWithTokenAuth(t *testing.T, caCert *x509.Certificate, APIServer, clientName, token, clustername string) *clientcmdapi.Config {
|
||||
spec := &kubeConfigSpec{
|
||||
CACert: caCert,
|
||||
APIServer: APIServer,
|
||||
@ -477,7 +457,7 @@ func setupdKubeConfigWithTokenAuth(t *testing.T, caCert *x509.Certificate, APISe
|
||||
},
|
||||
}
|
||||
|
||||
config, err := buildKubeConfigFromSpec(spec)
|
||||
config, err := buildKubeConfigFromSpec(spec, clustername)
|
||||
if err != nil {
|
||||
t.Fatal("buildKubeConfigFromSpec failed!")
|
||||
}
|
||||
|
26
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet/BUILD
generated
vendored
26
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet/BUILD
generated
vendored
@ -2,42 +2,56 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["kubelet.go"],
|
||||
srcs = [
|
||||
"config.go",
|
||||
"dynamic.go",
|
||||
"flags.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//cmd/kubeadm/app/apis/kubeadm/v1alpha2:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/features:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//cmd/kubeadm/app/util/apiclient:go_default_library",
|
||||
"//cmd/kubeadm/app/util/kubeconfig:go_default_library",
|
||||
"//pkg/apis/rbac/v1:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig/v1beta1:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//pkg/util/procfs:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["kubelet_test.go"],
|
||||
srcs = [
|
||||
"config_test.go",
|
||||
"dynamic_test.go",
|
||||
"flags_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig/v1beta1:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
180
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet/config.go
generated
vendored
Normal file
180
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet/config.go
generated
vendored
Normal file
@ -0,0 +1,180 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
rbachelper "k8s.io/kubernetes/pkg/apis/rbac/v1"
|
||||
kubeletconfigscheme "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme"
|
||||
kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/util/version"
|
||||
)
|
||||
|
||||
// WriteConfigToDisk writes the kubelet config object down to a file
|
||||
// Used at "kubeadm init" and "kubeadm upgrade" time
|
||||
func WriteConfigToDisk(kubeletConfig *kubeletconfigv1beta1.KubeletConfiguration, kubeletDir string) error {
|
||||
|
||||
kubeletBytes, err := getConfigBytes(kubeletConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return writeConfigBytesToDisk(kubeletBytes, kubeletDir)
|
||||
}
|
||||
|
||||
// CreateConfigMap creates a ConfigMap with the generic kubelet configuration.
|
||||
// Used at "kubeadm init" and "kubeadm upgrade" time
|
||||
func CreateConfigMap(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
|
||||
|
||||
k8sVersion, err := version.ParseSemantic(cfg.KubernetesVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configMapName := configMapName(k8sVersion)
|
||||
fmt.Printf("[kubelet] Creating a ConfigMap %q in namespace %s with the configuration for the kubelets in the cluster\n", configMapName, metav1.NamespaceSystem)
|
||||
|
||||
kubeletBytes, err := getConfigBytes(cfg.KubeletConfiguration.BaseConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := apiclient.CreateOrUpdateConfigMap(client, &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configMapName,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
},
|
||||
Data: map[string]string{
|
||||
kubeadmconstants.KubeletBaseConfigurationConfigMapKey: string(kubeletBytes),
|
||||
},
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := createConfigMapRBACRules(client, k8sVersion); err != nil {
|
||||
return fmt.Errorf("error creating kubelet configuration configmap RBAC rules: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// createConfigMapRBACRules creates the RBAC rules for exposing the base kubelet ConfigMap in the kube-system namespace to unauthenticated users
|
||||
func createConfigMapRBACRules(client clientset.Interface, k8sVersion *version.Version) error {
|
||||
if err := apiclient.CreateOrUpdateRole(client, &rbac.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configMapRBACName(k8sVersion),
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbachelper.NewRule("get").Groups("").Resources("configmaps").Names(configMapName(k8sVersion)).RuleOrDie(),
|
||||
},
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return apiclient.CreateOrUpdateRoleBinding(client, &rbac.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configMapRBACName(k8sVersion),
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
APIGroup: rbac.GroupName,
|
||||
Kind: "Role",
|
||||
Name: configMapRBACName(k8sVersion),
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
{
|
||||
Kind: rbac.GroupKind,
|
||||
Name: kubeadmconstants.NodesGroup,
|
||||
},
|
||||
{
|
||||
Kind: rbac.GroupKind,
|
||||
Name: kubeadmconstants.NodeBootstrapTokenAuthGroup,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// DownloadConfig downloads the kubelet configuration from a ConfigMap and writes it to disk.
|
||||
// Used at "kubeadm join" time
|
||||
func DownloadConfig(client clientset.Interface, kubeletVersion *version.Version, kubeletDir string) error {
|
||||
|
||||
// Download the ConfigMap from the cluster based on what version the kubelet is
|
||||
configMapName := configMapName(kubeletVersion)
|
||||
|
||||
fmt.Printf("[kubelet] Downloading configuration for the kubelet from the %q ConfigMap in the %s namespace\n",
|
||||
configMapName, metav1.NamespaceSystem)
|
||||
|
||||
kubeletCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(configMapName, metav1.GetOptions{})
|
||||
// If the ConfigMap wasn't found and the kubelet version is v1.10.x, where we didn't support the config file yet
|
||||
// just return, don't error out
|
||||
if apierrors.IsNotFound(err) && kubeletVersion.Minor() == 10 {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return writeConfigBytesToDisk([]byte(kubeletCfg.Data[kubeadmconstants.KubeletBaseConfigurationConfigMapKey]), kubeletDir)
|
||||
}
|
||||
|
||||
// configMapName returns the right ConfigMap name for the right branch of k8s
|
||||
func configMapName(k8sVersion *version.Version) string {
|
||||
return fmt.Sprintf("%s%d.%d", kubeadmconstants.KubeletBaseConfigurationConfigMapPrefix, k8sVersion.Major(), k8sVersion.Minor())
|
||||
}
|
||||
|
||||
// configMapRBACName returns the name for the Role/RoleBinding for the kubelet config configmap for the right branch of k8s
|
||||
func configMapRBACName(k8sVersion *version.Version) string {
|
||||
return fmt.Sprintf("%s%d.%d", kubeadmconstants.KubeletBaseConfigMapRolePrefix, k8sVersion.Major(), k8sVersion.Minor())
|
||||
}
|
||||
|
||||
// getConfigBytes marshals a kubeletconfiguration object to bytes
|
||||
func getConfigBytes(kubeletConfig *kubeletconfigv1beta1.KubeletConfiguration) ([]byte, error) {
|
||||
_, kubeletCodecs, err := kubeletconfigscheme.NewSchemeAndCodecs()
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
return kubeadmutil.MarshalToYamlForCodecs(kubeletConfig, kubeletconfigv1beta1.SchemeGroupVersion, *kubeletCodecs)
|
||||
}
|
||||
|
||||
// writeConfigBytesToDisk writes a byte slice down to disk at the specific location of the kubelet config file
|
||||
func writeConfigBytesToDisk(b []byte, kubeletDir string) error {
|
||||
configFile := filepath.Join(kubeletDir, kubeadmconstants.KubeletConfigurationFileName)
|
||||
fmt.Printf("[kubelet] Writing kubelet configuration to file %q\n", configFile)
|
||||
|
||||
// creates target folder if not already exists
|
||||
if err := os.MkdirAll(kubeletDir, 0700); err != nil {
|
||||
return fmt.Errorf("failed to create directory %q: %v", kubeletDir, err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(configFile, b, 0644); err != nil {
|
||||
return fmt.Errorf("failed to write kubelet configuration to the file %q: %v", configFile, err)
|
||||
}
|
||||
return nil
|
||||
}
|
78
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet/config_test.go
generated
vendored
Normal file
78
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet/config_test.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/util/version"
|
||||
)
|
||||
|
||||
func TestCreateConfigMap(t *testing.T) {
|
||||
nodeName := "fake-node"
|
||||
client := fake.NewSimpleClientset()
|
||||
cfg := &kubeadmapi.MasterConfiguration{
|
||||
NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: nodeName},
|
||||
KubernetesVersion: "v1.11.0",
|
||||
KubeletConfiguration: kubeadmapi.KubeletConfiguration{
|
||||
BaseConfig: &kubeletconfigv1beta1.KubeletConfiguration{},
|
||||
},
|
||||
}
|
||||
|
||||
client.PrependReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
},
|
||||
Spec: v1.NodeSpec{},
|
||||
}, nil
|
||||
})
|
||||
client.PrependReactor("create", "roles", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, nil
|
||||
})
|
||||
client.PrependReactor("create", "rolebindings", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, nil
|
||||
})
|
||||
client.PrependReactor("create", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, nil
|
||||
})
|
||||
|
||||
if err := CreateConfigMap(cfg, client); err != nil {
|
||||
t.Errorf("CreateConfigMap: unexpected error %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateConfigMapRBACRules(t *testing.T) {
|
||||
client := fake.NewSimpleClientset()
|
||||
client.PrependReactor("create", "roles", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, nil
|
||||
})
|
||||
client.PrependReactor("create", "rolebindings", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, nil
|
||||
})
|
||||
|
||||
if err := createConfigMapRBACRules(client, version.MustParseSemantic("v1.11.0")); err != nil {
|
||||
t.Errorf("createConfigMapRBACRules: unexpected error %v", err)
|
||||
}
|
||||
}
|
61
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet/dynamic.go
generated
vendored
Normal file
61
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet/dynamic.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
"k8s.io/kubernetes/pkg/util/version"
|
||||
)
|
||||
|
||||
// EnableDynamicConfigForNode updates the Node's ConfigSource to enable Dynamic Kubelet Configuration, depending on what version the kubelet is
|
||||
// Used at "kubeadm init", "kubeadm join" and "kubeadm upgrade" time
|
||||
// This func is ONLY run if the user enables the `DynamicKubeletConfig` feature gate, which is by default off
|
||||
func EnableDynamicConfigForNode(client clientset.Interface, nodeName string, kubeletVersion *version.Version) error {
|
||||
|
||||
configMapName := configMapName(kubeletVersion)
|
||||
fmt.Printf("[kubelet] Enabling Dynamic Kubelet Config for Node %q; config sourced from ConfigMap %q in namespace %s\n",
|
||||
nodeName, configMapName, metav1.NamespaceSystem)
|
||||
fmt.Println("[kubelet] WARNING: The Dynamic Kubelet Config feature is alpha and off by default. It hasn't been well-tested yet at this stage, use with caution.")
|
||||
|
||||
kubeletConfigMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(configMapName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get the kubelet configuration ConfigMap: %v", err)
|
||||
}
|
||||
|
||||
// Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned.
|
||||
return apiclient.PatchNode(client, nodeName, func(n *v1.Node) {
|
||||
patchNodeForDynamicConfig(n, configMapName, kubeletConfigMap.UID)
|
||||
})
|
||||
}
|
||||
|
||||
func patchNodeForDynamicConfig(n *v1.Node, configMapName string, configMapUID types.UID) {
|
||||
n.Spec.ConfigSource = &v1.NodeConfigSource{
|
||||
ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Name: configMapName,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
UID: configMapUID,
|
||||
KubeletConfigKey: kubeadmconstants.KubeletBaseConfigurationConfigMapKey,
|
||||
},
|
||||
}
|
||||
}
|
65
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet/dynamic_test.go
generated
vendored
Normal file
65
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet/dynamic_test.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/util/version"
|
||||
)
|
||||
|
||||
func TestEnableDynamicConfigForNode(t *testing.T) {
|
||||
nodeName := "fake-node"
|
||||
client := fake.NewSimpleClientset()
|
||||
client.PrependReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
Labels: map[string]string{kubeletapis.LabelHostname: nodeName},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ConfigSource: &v1.NodeConfigSource{
|
||||
ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
UID: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
client.PrependReactor("get", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kubelet-config-1.11",
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
UID: "fake-uid",
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
client.PrependReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, nil
|
||||
})
|
||||
|
||||
if err := EnableDynamicConfigForNode(client, nodeName, version.MustParseSemantic("v1.11.0")); err != nil {
|
||||
t.Errorf("UpdateNodeWithConfigMap: unexpected error %v", err)
|
||||
}
|
||||
}
|
130
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet/flags.go
generated
vendored
Normal file
130
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet/flags.go
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmapiv1alpha2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha2"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/features"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
"k8s.io/kubernetes/pkg/util/procfs"
|
||||
utilsexec "k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
type kubeletFlagsOpts struct {
|
||||
nodeRegOpts *kubeadmapi.NodeRegistrationOptions
|
||||
featureGates map[string]bool
|
||||
registerTaintsUsingFlags bool
|
||||
execer utilsexec.Interface
|
||||
pidOfFunc func(string) ([]int, error)
|
||||
defaultHostname string
|
||||
}
|
||||
|
||||
// WriteKubeletDynamicEnvFile writes a environment file with dynamic flags to the kubelet.
|
||||
// Used at "kubeadm init" and "kubeadm join" time.
|
||||
func WriteKubeletDynamicEnvFile(nodeRegOpts *kubeadmapi.NodeRegistrationOptions, featureGates map[string]bool, registerTaintsUsingFlags bool, kubeletDir string) error {
|
||||
|
||||
flagOpts := kubeletFlagsOpts{
|
||||
nodeRegOpts: nodeRegOpts,
|
||||
featureGates: featureGates,
|
||||
registerTaintsUsingFlags: registerTaintsUsingFlags,
|
||||
execer: utilsexec.New(),
|
||||
pidOfFunc: procfs.PidOf,
|
||||
defaultHostname: nodeutil.GetHostname(""),
|
||||
}
|
||||
stringMap := buildKubeletArgMap(flagOpts)
|
||||
argList := kubeadmutil.BuildArgumentListFromMap(stringMap, nodeRegOpts.KubeletExtraArgs)
|
||||
envFileContent := fmt.Sprintf("%s=%s\n", constants.KubeletEnvFileVariableName, strings.Join(argList, " "))
|
||||
|
||||
return writeKubeletFlagBytesToDisk([]byte(envFileContent), kubeletDir)
|
||||
}
|
||||
|
||||
// buildKubeletArgMap takes a MasterConfiguration object and builds based on that a string-string map with flags
|
||||
// that should be given to the local kubelet daemon.
|
||||
func buildKubeletArgMap(opts kubeletFlagsOpts) map[string]string {
|
||||
kubeletFlags := map[string]string{}
|
||||
|
||||
if opts.nodeRegOpts.CRISocket == kubeadmapiv1alpha2.DefaultCRISocket {
|
||||
// These flags should only be set when running docker
|
||||
kubeletFlags["network-plugin"] = "cni"
|
||||
kubeletFlags["cni-conf-dir"] = "/etc/cni/net.d"
|
||||
kubeletFlags["cni-bin-dir"] = "/opt/cni/bin"
|
||||
driver, err := kubeadmutil.GetCgroupDriverDocker(opts.execer)
|
||||
if err != nil {
|
||||
glog.Warningf("cannot automatically assign a '--cgroup-driver' value when starting the Kubelet: %v\n", err)
|
||||
} else {
|
||||
kubeletFlags["cgroup-driver"] = driver
|
||||
}
|
||||
} else {
|
||||
kubeletFlags["container-runtime"] = "remote"
|
||||
kubeletFlags["container-runtime-endpoint"] = opts.nodeRegOpts.CRISocket
|
||||
}
|
||||
|
||||
if opts.registerTaintsUsingFlags && opts.nodeRegOpts.Taints != nil && len(opts.nodeRegOpts.Taints) > 0 {
|
||||
taintStrs := []string{}
|
||||
for _, taint := range opts.nodeRegOpts.Taints {
|
||||
taintStrs = append(taintStrs, taint.ToString())
|
||||
}
|
||||
|
||||
kubeletFlags["register-with-taints"] = strings.Join(taintStrs, ",")
|
||||
}
|
||||
|
||||
if pids, _ := opts.pidOfFunc("systemd-resolved"); len(pids) > 0 {
|
||||
// procfs.PidOf only returns an error if the regex is empty or doesn't compile, so we can ignore it
|
||||
kubeletFlags["resolv-conf"] = "/run/systemd/resolve/resolv.conf"
|
||||
}
|
||||
|
||||
// Make sure the node name we're passed will work with Kubelet
|
||||
if opts.nodeRegOpts.Name != "" && opts.nodeRegOpts.Name != opts.defaultHostname {
|
||||
glog.V(1).Info("setting kubelet hostname-override to %q", opts.nodeRegOpts.Name)
|
||||
kubeletFlags["hostname-override"] = opts.nodeRegOpts.Name
|
||||
}
|
||||
|
||||
// If the user enabled Dynamic Kubelet Configuration (which is disabled by default), set the directory
|
||||
// in the CLI flags so that the feature actually gets enabled
|
||||
if features.Enabled(opts.featureGates, features.DynamicKubeletConfig) {
|
||||
kubeletFlags["dynamic-config-dir"] = filepath.Join(constants.KubeletRunDirectory, constants.DynamicKubeletConfigurationDirectoryName)
|
||||
}
|
||||
|
||||
// TODO: Conditionally set `--cgroup-driver` to either `systemd` or `cgroupfs` for CRI other than Docker
|
||||
|
||||
return kubeletFlags
|
||||
}
|
||||
|
||||
// writeKubeletFlagBytesToDisk writes a byte slice down to disk at the specific location of the kubelet flag overrides file
|
||||
func writeKubeletFlagBytesToDisk(b []byte, kubeletDir string) error {
|
||||
kubeletEnvFilePath := filepath.Join(kubeletDir, constants.KubeletEnvFileName)
|
||||
fmt.Printf("[kubelet] Writing kubelet environment file with flags to file %q\n", kubeletEnvFilePath)
|
||||
|
||||
// creates target folder if not already exists
|
||||
if err := os.MkdirAll(kubeletDir, 0700); err != nil {
|
||||
return fmt.Errorf("failed to create directory %q: %v", kubeletDir, err)
|
||||
}
|
||||
if err := ioutil.WriteFile(kubeletEnvFilePath, b, 0644); err != nil {
|
||||
return fmt.Errorf("failed to write kubelet configuration to the file %q: %v", kubeletEnvFilePath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
277
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet/flags_test.go
generated
vendored
Normal file
277
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet/flags_test.go
generated
vendored
Normal file
@ -0,0 +1,277 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
"k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
type fakeCmd struct {
|
||||
b []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func (f fakeCmd) Run() error { return f.err }
|
||||
func (f fakeCmd) CombinedOutput() ([]byte, error) { return f.b, f.err }
|
||||
func (f fakeCmd) Output() ([]byte, error) { return f.b, f.err }
|
||||
func (f fakeCmd) SetDir(dir string) {}
|
||||
func (f fakeCmd) SetStdin(in io.Reader) {}
|
||||
func (f fakeCmd) SetStdout(out io.Writer) {}
|
||||
func (f fakeCmd) SetStderr(out io.Writer) {}
|
||||
func (f fakeCmd) Stop() {}
|
||||
|
||||
type fakeExecer struct {
|
||||
ioMap map[string]fakeCmd
|
||||
}
|
||||
|
||||
func (f fakeExecer) Command(cmd string, args ...string) exec.Cmd {
|
||||
cmds := []string{cmd}
|
||||
cmds = append(cmds, args...)
|
||||
return f.ioMap[strings.Join(cmds, " ")]
|
||||
}
|
||||
func (f fakeExecer) CommandContext(ctx context.Context, cmd string, args ...string) exec.Cmd {
|
||||
return f.Command(cmd, args...)
|
||||
}
|
||||
func (f fakeExecer) LookPath(file string) (string, error) { return "", errors.New("unknown binary") }
|
||||
|
||||
var (
|
||||
systemdCgroupExecer = fakeExecer{
|
||||
ioMap: map[string]fakeCmd{
|
||||
"docker info": {
|
||||
b: []byte(`Cgroup Driver: systemd`),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cgroupfsCgroupExecer = fakeExecer{
|
||||
ioMap: map[string]fakeCmd{
|
||||
"docker info": {
|
||||
b: []byte(`Cgroup Driver: cgroupfs`),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
errCgroupExecer = fakeExecer{
|
||||
ioMap: map[string]fakeCmd{
|
||||
"docker info": {
|
||||
err: fmt.Errorf("no such binary: docker"),
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func binaryRunningPidOfFunc(_ string) ([]int, error) {
|
||||
return []int{1, 2, 3}, nil
|
||||
}
|
||||
|
||||
func binaryNotRunningPidOfFunc(_ string) ([]int, error) {
|
||||
return []int{}, nil
|
||||
}
|
||||
|
||||
func TestBuildKubeletArgMap(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
opts kubeletFlagsOpts
|
||||
expected map[string]string
|
||||
}{
|
||||
{
|
||||
name: "the simplest case",
|
||||
opts: kubeletFlagsOpts{
|
||||
nodeRegOpts: &kubeadmapi.NodeRegistrationOptions{
|
||||
CRISocket: "/var/run/dockershim.sock",
|
||||
Name: "foo",
|
||||
Taints: []v1.Taint{ // This should be ignored as registerTaintsUsingFlags is false
|
||||
{
|
||||
Key: "foo",
|
||||
Value: "bar",
|
||||
Effect: "baz",
|
||||
},
|
||||
},
|
||||
},
|
||||
execer: errCgroupExecer,
|
||||
pidOfFunc: binaryNotRunningPidOfFunc,
|
||||
defaultHostname: "foo",
|
||||
},
|
||||
expected: map[string]string{
|
||||
"network-plugin": "cni",
|
||||
"cni-conf-dir": "/etc/cni/net.d",
|
||||
"cni-bin-dir": "/opt/cni/bin",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nodeRegOpts.Name != default hostname",
|
||||
opts: kubeletFlagsOpts{
|
||||
nodeRegOpts: &kubeadmapi.NodeRegistrationOptions{
|
||||
CRISocket: "/var/run/dockershim.sock",
|
||||
Name: "override-name",
|
||||
},
|
||||
execer: errCgroupExecer,
|
||||
pidOfFunc: binaryNotRunningPidOfFunc,
|
||||
defaultHostname: "default",
|
||||
},
|
||||
expected: map[string]string{
|
||||
"network-plugin": "cni",
|
||||
"cni-conf-dir": "/etc/cni/net.d",
|
||||
"cni-bin-dir": "/opt/cni/bin",
|
||||
"hostname-override": "override-name",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "systemd cgroup driver",
|
||||
opts: kubeletFlagsOpts{
|
||||
nodeRegOpts: &kubeadmapi.NodeRegistrationOptions{
|
||||
CRISocket: "/var/run/dockershim.sock",
|
||||
Name: "foo",
|
||||
},
|
||||
execer: systemdCgroupExecer,
|
||||
pidOfFunc: binaryNotRunningPidOfFunc,
|
||||
defaultHostname: "foo",
|
||||
},
|
||||
expected: map[string]string{
|
||||
"network-plugin": "cni",
|
||||
"cni-conf-dir": "/etc/cni/net.d",
|
||||
"cni-bin-dir": "/opt/cni/bin",
|
||||
"cgroup-driver": "systemd",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cgroupfs cgroup driver",
|
||||
opts: kubeletFlagsOpts{
|
||||
nodeRegOpts: &kubeadmapi.NodeRegistrationOptions{
|
||||
CRISocket: "/var/run/dockershim.sock",
|
||||
Name: "foo",
|
||||
},
|
||||
execer: cgroupfsCgroupExecer,
|
||||
pidOfFunc: binaryNotRunningPidOfFunc,
|
||||
defaultHostname: "foo",
|
||||
},
|
||||
expected: map[string]string{
|
||||
"network-plugin": "cni",
|
||||
"cni-conf-dir": "/etc/cni/net.d",
|
||||
"cni-bin-dir": "/opt/cni/bin",
|
||||
"cgroup-driver": "cgroupfs",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "external CRI runtime",
|
||||
opts: kubeletFlagsOpts{
|
||||
nodeRegOpts: &kubeadmapi.NodeRegistrationOptions{
|
||||
CRISocket: "/var/run/containerd.sock",
|
||||
Name: "foo",
|
||||
},
|
||||
execer: cgroupfsCgroupExecer,
|
||||
pidOfFunc: binaryNotRunningPidOfFunc,
|
||||
defaultHostname: "foo",
|
||||
},
|
||||
expected: map[string]string{
|
||||
"container-runtime": "remote",
|
||||
"container-runtime-endpoint": "/var/run/containerd.sock",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "register with taints",
|
||||
opts: kubeletFlagsOpts{
|
||||
nodeRegOpts: &kubeadmapi.NodeRegistrationOptions{
|
||||
CRISocket: "/var/run/containerd.sock",
|
||||
Name: "foo",
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: "foo",
|
||||
Value: "bar",
|
||||
Effect: "baz",
|
||||
},
|
||||
{
|
||||
Key: "key",
|
||||
Value: "val",
|
||||
Effect: "eff",
|
||||
},
|
||||
},
|
||||
},
|
||||
registerTaintsUsingFlags: true,
|
||||
execer: cgroupfsCgroupExecer,
|
||||
pidOfFunc: binaryNotRunningPidOfFunc,
|
||||
defaultHostname: "foo",
|
||||
},
|
||||
expected: map[string]string{
|
||||
"container-runtime": "remote",
|
||||
"container-runtime-endpoint": "/var/run/containerd.sock",
|
||||
"register-with-taints": "foo=bar:baz,key=val:eff",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "systemd-resolved running",
|
||||
opts: kubeletFlagsOpts{
|
||||
nodeRegOpts: &kubeadmapi.NodeRegistrationOptions{
|
||||
CRISocket: "/var/run/containerd.sock",
|
||||
Name: "foo",
|
||||
},
|
||||
execer: cgroupfsCgroupExecer,
|
||||
pidOfFunc: binaryRunningPidOfFunc,
|
||||
defaultHostname: "foo",
|
||||
},
|
||||
expected: map[string]string{
|
||||
"container-runtime": "remote",
|
||||
"container-runtime-endpoint": "/var/run/containerd.sock",
|
||||
"resolv-conf": "/run/systemd/resolve/resolv.conf",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "dynamic kubelet config enabled",
|
||||
opts: kubeletFlagsOpts{
|
||||
nodeRegOpts: &kubeadmapi.NodeRegistrationOptions{
|
||||
CRISocket: "/var/run/containerd.sock",
|
||||
Name: "foo",
|
||||
},
|
||||
featureGates: map[string]bool{
|
||||
"DynamicKubeletConfig": true,
|
||||
},
|
||||
execer: cgroupfsCgroupExecer,
|
||||
pidOfFunc: binaryNotRunningPidOfFunc,
|
||||
defaultHostname: "foo",
|
||||
},
|
||||
expected: map[string]string{
|
||||
"container-runtime": "remote",
|
||||
"container-runtime-endpoint": "/var/run/containerd.sock",
|
||||
"dynamic-config-dir": "/var/lib/kubelet/dynamic-config",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
actual := buildKubeletArgMap(test.opts)
|
||||
if !reflect.DeepEqual(actual, test.expected) {
|
||||
t.Errorf(
|
||||
"failed buildKubeletArgMap:\n\texpected: %v\n\t actual: %v",
|
||||
test.expected,
|
||||
actual,
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
234
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet/kubelet.go
generated
vendored
234
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet/kubelet.go
generated
vendored
@ -1,234 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
|
||||
rbachelper "k8s.io/kubernetes/pkg/apis/rbac/v1"
|
||||
kubeletconfigscheme "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme"
|
||||
kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1"
|
||||
)
|
||||
|
||||
// CreateBaseKubeletConfiguration creates base kubelet configuration for dynamic kubelet configuration feature.
|
||||
func CreateBaseKubeletConfiguration(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
|
||||
fmt.Printf("[kubelet] Uploading a ConfigMap %q in namespace %s with base configuration for the kubelets in the cluster\n",
|
||||
kubeadmconstants.KubeletBaseConfigurationConfigMap, metav1.NamespaceSystem)
|
||||
|
||||
_, kubeletCodecs, err := kubeletconfigscheme.NewSchemeAndCodecs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kubeletBytes, err := kubeadmutil.MarshalToYamlForCodecs(cfg.KubeletConfiguration.BaseConfig, kubeletconfigv1beta1.SchemeGroupVersion, *kubeletCodecs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = apiclient.CreateOrUpdateConfigMap(client, &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: kubeadmconstants.KubeletBaseConfigurationConfigMap,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
},
|
||||
Data: map[string]string{
|
||||
kubeadmconstants.KubeletBaseConfigurationConfigMapKey: string(kubeletBytes),
|
||||
},
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := createKubeletBaseConfigMapRBACRules(client); err != nil {
|
||||
return fmt.Errorf("error creating base kubelet configmap RBAC rules: %v", err)
|
||||
}
|
||||
|
||||
return updateNodeWithConfigMap(client, cfg.NodeName)
|
||||
}
|
||||
|
||||
// ConsumeBaseKubeletConfiguration consumes base kubelet configuration for dynamic kubelet configuration feature.
|
||||
func ConsumeBaseKubeletConfiguration(nodeName string) error {
|
||||
client, err := getLocalNodeTLSBootstrappedClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
kubeletCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(kubeadmconstants.KubeletBaseConfigurationConfigMap, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := writeInitKubeletConfigToDisk([]byte(kubeletCfg.Data[kubeadmconstants.KubeletBaseConfigurationConfigMapKey])); err != nil {
|
||||
return fmt.Errorf("failed to write initial remote configuration of kubelet to disk for node %s: %v", nodeName, err)
|
||||
}
|
||||
|
||||
return updateNodeWithConfigMap(client, nodeName)
|
||||
}
|
||||
|
||||
// updateNodeWithConfigMap updates node ConfigSource with KubeletBaseConfigurationConfigMap
|
||||
func updateNodeWithConfigMap(client clientset.Interface, nodeName string) error {
|
||||
fmt.Printf("[kubelet] Using Dynamic Kubelet Config for node %q; config sourced from ConfigMap %q in namespace %s\n",
|
||||
nodeName, kubeadmconstants.KubeletBaseConfigurationConfigMap, metav1.NamespaceSystem)
|
||||
|
||||
// Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned.
|
||||
return wait.Poll(kubeadmconstants.APICallRetryInterval, kubeadmconstants.UpdateNodeTimeout, func() (bool, error) {
|
||||
node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
oldData, err := json.Marshal(node)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
kubeletCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(kubeadmconstants.KubeletBaseConfigurationConfigMap, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
node.Spec.ConfigSource = &v1.NodeConfigSource{
|
||||
ConfigMapRef: &v1.ObjectReference{
|
||||
Name: kubeadmconstants.KubeletBaseConfigurationConfigMap,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
UID: kubeletCfg.UID,
|
||||
},
|
||||
}
|
||||
|
||||
newData, err := json.Marshal(node)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if _, err := client.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes); err != nil {
|
||||
if apierrs.IsConflict(err) {
|
||||
fmt.Println("Temporarily unable to update node metadata due to conflict (will retry)")
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// createKubeletBaseConfigMapRBACRules creates the RBAC rules for exposing the base kubelet ConfigMap in the kube-system namespace to unauthenticated users
|
||||
func createKubeletBaseConfigMapRBACRules(client clientset.Interface) error {
|
||||
if err := apiclient.CreateOrUpdateRole(client, &rbac.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: kubeadmconstants.KubeletBaseConfigMapRoleName,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbachelper.NewRule("get").Groups("").Resources("configmaps").Names(kubeadmconstants.KubeletBaseConfigurationConfigMap).RuleOrDie(),
|
||||
},
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return apiclient.CreateOrUpdateRoleBinding(client, &rbac.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: kubeadmconstants.KubeletBaseConfigMapRoleName,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
APIGroup: rbac.GroupName,
|
||||
Kind: "Role",
|
||||
Name: kubeadmconstants.KubeletBaseConfigMapRoleName,
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
{
|
||||
Kind: rbac.GroupKind,
|
||||
Name: kubeadmconstants.NodesGroup,
|
||||
},
|
||||
{
|
||||
Kind: rbac.GroupKind,
|
||||
Name: kubeadmconstants.NodeBootstrapTokenAuthGroup,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// getLocalNodeTLSBootstrappedClient waits for the kubelet to perform the TLS bootstrap
|
||||
// and then creates a client from config file /etc/kubernetes/kubelet.conf
|
||||
func getLocalNodeTLSBootstrappedClient() (clientset.Interface, error) {
|
||||
fmt.Println("[tlsbootstrap] Waiting for the kubelet to perform the TLS Bootstrap...")
|
||||
|
||||
kubeletKubeConfig := filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.KubeletKubeConfigFileName)
|
||||
|
||||
// Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned.
|
||||
err := wait.PollImmediateInfinite(kubeadmconstants.APICallRetryInterval, func() (bool, error) {
|
||||
_, err := os.Stat(kubeletKubeConfig)
|
||||
return (err == nil), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return kubeconfigutil.ClientSetFromFile(kubeletKubeConfig)
|
||||
}
|
||||
|
||||
// WriteInitKubeletConfigToDiskOnMaster writes base kubelet configuration to disk on master.
|
||||
func WriteInitKubeletConfigToDiskOnMaster(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
fmt.Printf("[kubelet] Writing base configuration of kubelets to disk on master node %s\n", cfg.NodeName)
|
||||
|
||||
_, kubeletCodecs, err := kubeletconfigscheme.NewSchemeAndCodecs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
kubeletBytes, err := kubeadmutil.MarshalToYamlForCodecs(cfg.KubeletConfiguration.BaseConfig, kubeletconfigv1beta1.SchemeGroupVersion, *kubeletCodecs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := writeInitKubeletConfigToDisk(kubeletBytes); err != nil {
|
||||
return fmt.Errorf("failed to write base configuration of kubelet to disk on master node %s: %v", cfg.NodeName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeInitKubeletConfigToDisk(kubeletConfig []byte) error {
|
||||
if err := os.MkdirAll(kubeadmconstants.KubeletBaseConfigurationDir, 0644); err != nil {
|
||||
return fmt.Errorf("failed to create directory %q: %v", kubeadmconstants.KubeletBaseConfigurationDir, err)
|
||||
}
|
||||
baseConfigFile := filepath.Join(kubeadmconstants.KubeletBaseConfigurationDir, kubeadmconstants.KubeletBaseConfigurationFile)
|
||||
if err := ioutil.WriteFile(baseConfigFile, kubeletConfig, 0644); err != nil {
|
||||
return fmt.Errorf("failed to write initial remote configuration of kubelet into file %q: %v", baseConfigFile, err)
|
||||
}
|
||||
return nil
|
||||
}
|
134
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet/kubelet_test.go
generated
vendored
134
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet/kubelet_test.go
generated
vendored
@ -1,134 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1"
|
||||
)
|
||||
|
||||
func TestCreateBaseKubeletConfiguration(t *testing.T) {
|
||||
nodeName := "fake-node"
|
||||
client := fake.NewSimpleClientset()
|
||||
cfg := &kubeadmapi.MasterConfiguration{
|
||||
NodeName: nodeName,
|
||||
KubeletConfiguration: kubeadmapi.KubeletConfiguration{
|
||||
BaseConfig: &kubeletconfigv1beta1.KubeletConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "KubeletConfiguration",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
client.PrependReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ConfigSource: &v1.NodeConfigSource{
|
||||
ConfigMapRef: &v1.ObjectReference{
|
||||
UID: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
client.PrependReactor("get", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: kubeadmconstants.KubeletBaseConfigurationConfigMap,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
UID: "fake-uid",
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
client.PrependReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, nil
|
||||
})
|
||||
client.PrependReactor("create", "roles", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, nil
|
||||
})
|
||||
client.PrependReactor("create", "rolebindings", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, nil
|
||||
})
|
||||
client.PrependReactor("create", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, nil
|
||||
})
|
||||
|
||||
if err := CreateBaseKubeletConfiguration(cfg, client); err != nil {
|
||||
t.Errorf("CreateBaseKubeletConfiguration: unexepected error %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateNodeWithConfigMap(t *testing.T) {
|
||||
nodeName := "fake-node"
|
||||
client := fake.NewSimpleClientset()
|
||||
client.PrependReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ConfigSource: &v1.NodeConfigSource{
|
||||
ConfigMapRef: &v1.ObjectReference{
|
||||
UID: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
client.PrependReactor("get", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: kubeadmconstants.KubeletBaseConfigurationConfigMap,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
UID: "fake-uid",
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
client.PrependReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, nil
|
||||
})
|
||||
|
||||
if err := updateNodeWithConfigMap(client, nodeName); err != nil {
|
||||
t.Errorf("UpdateNodeWithConfigMap: unexepected error %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateKubeletBaseConfigMapRBACRules(t *testing.T) {
|
||||
client := fake.NewSimpleClientset()
|
||||
client.PrependReactor("create", "roles", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, nil
|
||||
})
|
||||
client.PrependReactor("create", "rolebindings", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, nil
|
||||
})
|
||||
|
||||
if err := createKubeletBaseConfigMapRBACRules(client); err != nil {
|
||||
t.Errorf("createKubeletBaseConfigMapRBACRules: unexepected error %v", err)
|
||||
}
|
||||
}
|
7
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/markmaster/BUILD
generated
vendored
7
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/markmaster/BUILD
generated
vendored
@ -27,13 +27,8 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/markmaster",
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//cmd/kubeadm/app/util/apiclient:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
105
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/markmaster/markmaster.go
generated
vendored
105
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/markmaster/markmaster.go
generated
vendored
@ -17,105 +17,50 @@ limitations under the License.
|
||||
package markmaster
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
)
|
||||
|
||||
// MarkMaster taints the master and sets the master label
|
||||
func MarkMaster(client clientset.Interface, masterName string, taint bool) error {
|
||||
func MarkMaster(client clientset.Interface, masterName string, taints []v1.Taint) error {
|
||||
|
||||
if taint {
|
||||
fmt.Printf("[markmaster] Will mark node %s as master by adding a label and a taint\n", masterName)
|
||||
} else {
|
||||
fmt.Printf("[markmaster] Will mark node %s as master by adding a label\n", masterName)
|
||||
fmt.Printf("[markmaster] Marking the node %s as master by adding the label \"%s=''\"\n", masterName, constants.LabelNodeRoleMaster)
|
||||
|
||||
if taints != nil && len(taints) > 0 {
|
||||
taintStrs := []string{}
|
||||
for _, taint := range taints {
|
||||
taintStrs = append(taintStrs, taint.ToString())
|
||||
}
|
||||
fmt.Printf("[markmaster] Marking the node %s as master by adding the taints %v\n", masterName, taintStrs)
|
||||
}
|
||||
|
||||
// Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned.
|
||||
return wait.Poll(kubeadmconstants.APICallRetryInterval, kubeadmconstants.MarkMasterTimeout, func() (bool, error) {
|
||||
// First get the node object
|
||||
n, err := client.CoreV1().Nodes().Get(masterName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// The node may appear to have no labels at first,
|
||||
// so we wait for it to get hostname label.
|
||||
if _, found := n.ObjectMeta.Labels[kubeletapis.LabelHostname]; !found {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
oldData, err := json.Marshal(n)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// The master node should be tainted and labelled accordingly
|
||||
markMasterNode(n, taint)
|
||||
|
||||
newData, err := json.Marshal(n)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if _, err := client.CoreV1().Nodes().Patch(n.Name, types.StrategicMergePatchType, patchBytes); err != nil {
|
||||
if apierrs.IsConflict(err) {
|
||||
fmt.Println("[markmaster] Temporarily unable to update master node metadata due to conflict (will retry)")
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
if taint {
|
||||
fmt.Printf("[markmaster] Master %s tainted and labelled with key/value: %s=%q\n", masterName, kubeadmconstants.LabelNodeRoleMaster, "")
|
||||
} else {
|
||||
fmt.Printf("[markmaster] Master %s labelled with key/value: %s=%q\n", masterName, kubeadmconstants.LabelNodeRoleMaster, "")
|
||||
}
|
||||
|
||||
return true, nil
|
||||
return apiclient.PatchNode(client, masterName, func(n *v1.Node) {
|
||||
markMasterNode(n, taints)
|
||||
})
|
||||
}
|
||||
|
||||
func markMasterNode(n *v1.Node, taint bool) {
|
||||
n.ObjectMeta.Labels[kubeadmconstants.LabelNodeRoleMaster] = ""
|
||||
if taint {
|
||||
addTaintIfNotExists(n, kubeadmconstants.MasterTaint)
|
||||
} else {
|
||||
delTaintIfExists(n, kubeadmconstants.MasterTaint)
|
||||
}
|
||||
}
|
||||
|
||||
func addTaintIfNotExists(n *v1.Node, t v1.Taint) {
|
||||
for _, taint := range n.Spec.Taints {
|
||||
if taint == t {
|
||||
return
|
||||
func taintExists(taint v1.Taint, taints []v1.Taint) bool {
|
||||
for _, t := range taints {
|
||||
if t == taint {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
n.Spec.Taints = append(n.Spec.Taints, t)
|
||||
return false
|
||||
}
|
||||
|
||||
func delTaintIfExists(n *v1.Node, t v1.Taint) {
|
||||
var taints []v1.Taint
|
||||
for _, taint := range n.Spec.Taints {
|
||||
if taint == t {
|
||||
continue
|
||||
func markMasterNode(n *v1.Node, taints []v1.Taint) {
|
||||
n.ObjectMeta.Labels[constants.LabelNodeRoleMaster] = ""
|
||||
|
||||
for _, nt := range n.Spec.Taints {
|
||||
if !taintExists(nt, taints) {
|
||||
taints = append(taints, nt)
|
||||
}
|
||||
taints = append(taints, t)
|
||||
}
|
||||
|
||||
n.Spec.Taints = taints
|
||||
}
|
||||
|
56
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/markmaster/markmaster_test.go
generated
vendored
56
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/markmaster/markmaster_test.go
generated
vendored
@ -40,53 +40,70 @@ func TestMarkMaster(t *testing.T) {
|
||||
// will need to change if strategicpatch's behavior changes in the
|
||||
// future.
|
||||
tests := []struct {
|
||||
name string
|
||||
existingLabel string
|
||||
existingTaint *v1.Taint
|
||||
wantTaint bool
|
||||
expectedPatch string
|
||||
name string
|
||||
existingLabel string
|
||||
existingTaints []v1.Taint
|
||||
newTaints []v1.Taint
|
||||
expectedPatch string
|
||||
}{
|
||||
{
|
||||
"master label and taint missing",
|
||||
"",
|
||||
nil,
|
||||
true,
|
||||
[]v1.Taint{kubeadmconstants.MasterTaint},
|
||||
"{\"metadata\":{\"labels\":{\"node-role.kubernetes.io/master\":\"\"}},\"spec\":{\"taints\":[{\"effect\":\"NoSchedule\",\"key\":\"node-role.kubernetes.io/master\"}]}}",
|
||||
},
|
||||
{
|
||||
"master label and taint missing but taint not wanted",
|
||||
"",
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
"{\"metadata\":{\"labels\":{\"node-role.kubernetes.io/master\":\"\"}}}",
|
||||
},
|
||||
{
|
||||
"master label missing",
|
||||
"",
|
||||
&kubeadmconstants.MasterTaint,
|
||||
true,
|
||||
[]v1.Taint{kubeadmconstants.MasterTaint},
|
||||
[]v1.Taint{kubeadmconstants.MasterTaint},
|
||||
"{\"metadata\":{\"labels\":{\"node-role.kubernetes.io/master\":\"\"}}}",
|
||||
},
|
||||
{
|
||||
"master taint missing",
|
||||
kubeadmconstants.LabelNodeRoleMaster,
|
||||
nil,
|
||||
true,
|
||||
[]v1.Taint{kubeadmconstants.MasterTaint},
|
||||
"{\"spec\":{\"taints\":[{\"effect\":\"NoSchedule\",\"key\":\"node-role.kubernetes.io/master\"}]}}",
|
||||
},
|
||||
{
|
||||
"nothing missing",
|
||||
kubeadmconstants.LabelNodeRoleMaster,
|
||||
&kubeadmconstants.MasterTaint,
|
||||
true,
|
||||
[]v1.Taint{kubeadmconstants.MasterTaint},
|
||||
[]v1.Taint{kubeadmconstants.MasterTaint},
|
||||
"{}",
|
||||
},
|
||||
{
|
||||
"nothing missing but taint unwanted",
|
||||
"has taint and no new taints wanted",
|
||||
kubeadmconstants.LabelNodeRoleMaster,
|
||||
&kubeadmconstants.MasterTaint,
|
||||
false,
|
||||
"{\"spec\":{\"taints\":null}}",
|
||||
[]v1.Taint{
|
||||
{
|
||||
Key: "node.cloudprovider.kubernetes.io/uninitialized",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
nil,
|
||||
"{}",
|
||||
},
|
||||
{
|
||||
"has taint and should merge with wanted taint",
|
||||
kubeadmconstants.LabelNodeRoleMaster,
|
||||
[]v1.Taint{
|
||||
{
|
||||
Key: "node.cloudprovider.kubernetes.io/uninitialized",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
[]v1.Taint{kubeadmconstants.MasterTaint},
|
||||
"{\"spec\":{\"taints\":[{\"effect\":\"NoSchedule\",\"key\":\"node-role.kubernetes.io/master\"},{\"effect\":\"NoSchedule\",\"key\":\"node.cloudprovider.kubernetes.io/uninitialized\"}]}}",
|
||||
},
|
||||
}
|
||||
|
||||
@ -105,8 +122,8 @@ func TestMarkMaster(t *testing.T) {
|
||||
masterNode.ObjectMeta.Labels[tc.existingLabel] = ""
|
||||
}
|
||||
|
||||
if tc.existingTaint != nil {
|
||||
masterNode.Spec.Taints = append(masterNode.Spec.Taints, *tc.existingTaint)
|
||||
if tc.existingTaints != nil {
|
||||
masterNode.Spec.Taints = tc.existingTaints
|
||||
}
|
||||
|
||||
jsonNode, err := json.Marshal(masterNode)
|
||||
@ -144,8 +161,7 @@ func TestMarkMaster(t *testing.T) {
|
||||
t.Fatalf("MarkMaster(%s): unexpected error building clientset: %v", tc.name, err)
|
||||
}
|
||||
|
||||
err = MarkMaster(cs, hostname, tc.wantTaint)
|
||||
if err != nil {
|
||||
if err := MarkMaster(cs, hostname, tc.newTaints); err != nil {
|
||||
t.Errorf("MarkMaster(%s) returned unexpected error: %v", tc.name, err)
|
||||
}
|
||||
|
||||
|
@ -2,18 +2,14 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"spec.go",
|
||||
"zz_generated.deepcopy.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd/spec",
|
||||
srcs = ["patchnode.go"],
|
||||
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/patchnode",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/util/apiclient:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
40
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/patchnode/patchnode.go
generated
vendored
Normal file
40
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/patchnode/patchnode.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package patchnode
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
)
|
||||
|
||||
// AnnotateCRISocket annotates the node with the given crisocket
|
||||
func AnnotateCRISocket(client clientset.Interface, nodeName string, criSocket string) error {
|
||||
|
||||
fmt.Printf("[patchnode] Uploading the CRI Socket information %q to the Node API object %q as an annotation\n", criSocket, nodeName)
|
||||
|
||||
return apiclient.PatchNode(client, nodeName, func(n *v1.Node) {
|
||||
annotateNodeWithCRISocket(n, criSocket)
|
||||
})
|
||||
}
|
||||
|
||||
func annotateNodeWithCRISocket(n *v1.Node, criSocket string) {
|
||||
n.ObjectMeta.Annotations[constants.AnnotationKubeadmCRISocket] = criSocket
|
||||
}
|
5
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting/BUILD
generated
vendored
@ -17,7 +17,6 @@ go_test(
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
],
|
||||
@ -37,11 +36,13 @@ go_library(
|
||||
"//cmd/kubeadm/app/features:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//cmd/kubeadm/app/util/apiclient:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
36
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting/selfhosting.go
generated
vendored
36
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting/selfhosting.go
generated
vendored
@ -18,18 +18,22 @@ package selfhosting
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/features"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -53,11 +57,12 @@ const (
|
||||
// Otherwise, there is a race condition when we proceed without kubelet having restarted the API server correctly and the next .Create call flakes
|
||||
// 9. Do that for the kube-apiserver, kube-controller-manager and kube-scheduler in a loop
|
||||
func CreateSelfHostedControlPlane(manifestsDir, kubeConfigDir string, cfg *kubeadmapi.MasterConfiguration, client clientset.Interface, waiter apiclient.Waiter, dryRun bool) error {
|
||||
|
||||
glog.V(1).Infoln("creating self hosted control plane")
|
||||
// Adjust the timeout slightly to something self-hosting specific
|
||||
waiter.SetTimeout(selfHostingWaitTimeout)
|
||||
|
||||
// Here the map of different mutators to use for the control plane's PodSpec is stored
|
||||
glog.V(1).Infoln("getting mutators")
|
||||
mutators := GetMutatorsFromFeatureGates(cfg.FeatureGates)
|
||||
|
||||
// Some extra work to be done if we should store the control plane certificates in Secrets
|
||||
@ -82,12 +87,11 @@ func CreateSelfHostedControlPlane(manifestsDir, kubeConfigDir string, cfg *kubea
|
||||
continue
|
||||
}
|
||||
|
||||
// Load the Static Pod file in order to be able to create a self-hosted variant of that file
|
||||
pod, err := volumeutil.LoadPodFromFile(manifestPath)
|
||||
// Load the Static Pod spec in order to be able to create a self-hosted variant of that file
|
||||
podSpec, err := loadPodSpecFromFile(manifestPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
podSpec := &pod.Spec
|
||||
|
||||
// Build a DaemonSet object from the loaded PodSpec
|
||||
ds := BuildDaemonSet(componentName, podSpec, mutators)
|
||||
@ -114,7 +118,7 @@ func CreateSelfHostedControlPlane(manifestsDir, kubeConfigDir string, cfg *kubea
|
||||
// Wait for the mirror Pod hash to be removed; otherwise we'll run into race conditions here when the kubelet hasn't had time to
|
||||
// remove the Static Pod (or the mirror Pod respectively). This implicitly also tests that the API server endpoint is healthy,
|
||||
// because this blocks until the API server returns a 404 Not Found when getting the Static Pod
|
||||
staticPodName := fmt.Sprintf("%s-%s", componentName, cfg.NodeName)
|
||||
staticPodName := fmt.Sprintf("%s-%s", componentName, cfg.NodeRegistration.Name)
|
||||
if err := waiter.WaitForPodToDisappear(staticPodName); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -171,3 +175,23 @@ func BuildSelfhostedComponentLabels(component string) map[string]string {
|
||||
func BuildSelfHostedComponentLabelQuery(componentName string) string {
|
||||
return fmt.Sprintf("k8s-app=%s", kubeadmconstants.AddSelfHostedPrefix(componentName))
|
||||
}
|
||||
|
||||
func loadPodSpecFromFile(filePath string) (*v1.PodSpec, error) {
|
||||
podDef, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read file path %s: %+v", filePath, err)
|
||||
}
|
||||
|
||||
if len(podDef) == 0 {
|
||||
return nil, fmt.Errorf("file was empty: %s", filePath)
|
||||
}
|
||||
|
||||
codec := clientscheme.Codecs.UniversalDecoder()
|
||||
pod := &v1.Pod{}
|
||||
|
||||
if err = runtime.DecodeInto(codec, podDef, pod); err != nil {
|
||||
return nil, fmt.Errorf("failed decoding pod: %v", err)
|
||||
}
|
||||
|
||||
return &pod.Spec, nil
|
||||
}
|
||||
|
18
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go
generated
vendored
18
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go
generated
vendored
@ -26,7 +26,6 @@ import (
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -494,11 +493,10 @@ func TestBuildDaemonSet(t *testing.T) {
|
||||
}
|
||||
defer os.Remove(tempFile)
|
||||
|
||||
pod, err := volumeutil.LoadPodFromFile(tempFile)
|
||||
podSpec, err := loadPodSpecFromFile(tempFile)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't load the specified Pod")
|
||||
t.Fatalf("couldn't load the specified Pod Spec")
|
||||
}
|
||||
podSpec := &pod.Spec
|
||||
|
||||
ds := BuildDaemonSet(rt.component, podSpec, GetDefaultMutators())
|
||||
dsBytes, err := util.MarshalToYaml(ds, apps.SchemeGroupVersion)
|
||||
@ -517,6 +515,11 @@ func TestLoadPodSpecFromFile(t *testing.T) {
|
||||
content string
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
// No content
|
||||
content: "",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
// Good YAML
|
||||
content: `
|
||||
@ -570,11 +573,16 @@ spec:
|
||||
}
|
||||
defer os.Remove(tempFile)
|
||||
|
||||
_, err = volumeutil.LoadPodFromFile(tempFile)
|
||||
_, err = loadPodSpecFromFile(tempFile)
|
||||
if (err != nil) != rt.expectError {
|
||||
t.Errorf("failed TestLoadPodSpecFromFile:\nexpected error:\n%t\nsaw:\n%v", rt.expectError, err)
|
||||
}
|
||||
}
|
||||
|
||||
_, err := loadPodSpecFromFile("")
|
||||
if err == nil {
|
||||
t.Error("unexpected success: loadPodSpecFromFile should return error when no file is given")
|
||||
}
|
||||
}
|
||||
|
||||
func createTempFileWithContent(content []byte) (string, error) {
|
||||
|
24
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/BUILD
generated
vendored
24
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/BUILD
generated
vendored
@ -4,11 +4,9 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"compute.go",
|
||||
"configuration.go",
|
||||
"health.go",
|
||||
"policy.go",
|
||||
"postupgrade.go",
|
||||
"postupgrade_v18_19.go",
|
||||
"prepull.go",
|
||||
"selfhosted.go",
|
||||
"staticpods.go",
|
||||
@ -18,8 +16,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library",
|
||||
"//cmd/kubeadm/app/apis/kubeadm/validation:go_default_library",
|
||||
"//cmd/kubeadm/app/apis/kubeadm/v1alpha2:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/features:go_default_library",
|
||||
"//cmd/kubeadm/app/images:go_default_library",
|
||||
@ -30,14 +27,15 @@ go_library(
|
||||
"//cmd/kubeadm/app/phases/certs:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/controlplane:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/etcd:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/kubelet:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/patchnode:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/selfhosting:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/uploadconfig:go_default_library",
|
||||
"//cmd/kubeadm/app/preflight:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//cmd/kubeadm/app/util/apiclient:go_default_library",
|
||||
"//cmd/kubeadm/app/util/config:go_default_library",
|
||||
"//cmd/kubeadm/app/util/dryrun:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//cmd/kubeadm/app/util/etcd:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
@ -45,10 +43,10 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/cert:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -71,24 +69,30 @@ go_test(
|
||||
srcs = [
|
||||
"compute_test.go",
|
||||
"policy_test.go",
|
||||
"postupgrade_v18_19_test.go",
|
||||
"postupgrade_test.go",
|
||||
"prepull_test.go",
|
||||
"staticpods_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library",
|
||||
"//cmd/kubeadm/app/apis/kubeadm/scheme:go_default_library",
|
||||
"//cmd/kubeadm/app/apis/kubeadm/v1alpha2:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/certs:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/certs/pkiutil:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/controlplane:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/etcd:go_default_library",
|
||||
"//cmd/kubeadm/app/util/apiclient:go_default_library",
|
||||
"//cmd/kubeadm/app/util/etcd:go_default_library",
|
||||
"//cmd/kubeadm/test:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/transport:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
],
|
||||
)
|
||||
|
61
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/compute.go
generated
vendored
61
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/compute.go
generated
vendored
@ -20,11 +20,12 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/features"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
"k8s.io/kubernetes/pkg/util/version"
|
||||
etcdutil "k8s.io/kubernetes/cmd/kubeadm/app/util/etcd"
|
||||
versionutil "k8s.io/kubernetes/pkg/util/version"
|
||||
)
|
||||
|
||||
// Upgrade defines an upgrade possibility to upgrade from a current version to a new one
|
||||
@ -50,6 +51,11 @@ func (u *Upgrade) CanUpgradeKubelets() bool {
|
||||
return !sameVersionFound
|
||||
}
|
||||
|
||||
// CanUpgradeEtcd returns whether an upgrade of etcd is possible
|
||||
func (u *Upgrade) CanUpgradeEtcd() bool {
|
||||
return u.Before.EtcdVersion != u.After.EtcdVersion
|
||||
}
|
||||
|
||||
// ActiveDNSAddon returns the version of CoreDNS or kube-dns
|
||||
func ActiveDNSAddon(featureGates map[string]bool) string {
|
||||
if features.Enabled(featureGates, features.CoreDNS) {
|
||||
@ -62,6 +68,8 @@ func ActiveDNSAddon(featureGates map[string]bool) string {
|
||||
type ClusterState struct {
|
||||
// KubeVersion describes the version of the Kubernetes API Server, Controller Manager, Scheduler and Proxy.
|
||||
KubeVersion string
|
||||
// DNSType
|
||||
DNSType string
|
||||
// DNSVersion describes the version of the kube-dns images used and manifest version
|
||||
DNSVersion string
|
||||
// KubeadmVersion describes the version of the kubeadm CLI
|
||||
@ -74,7 +82,7 @@ type ClusterState struct {
|
||||
|
||||
// GetAvailableUpgrades fetches all versions from the specified VersionGetter and computes which
|
||||
// kinds of upgrades can be performed
|
||||
func GetAvailableUpgrades(versionGetterImpl VersionGetter, experimentalUpgradesAllowed, rcUpgradesAllowed bool, cluster util.EtcdCluster, featureGates map[string]bool) ([]Upgrade, error) {
|
||||
func GetAvailableUpgrades(versionGetterImpl VersionGetter, experimentalUpgradesAllowed, rcUpgradesAllowed bool, etcdClient etcdutil.ClusterInterrogator, featureGates map[string]bool, client clientset.Interface) ([]Upgrade, error) {
|
||||
fmt.Println("[upgrade] Fetching available versions to upgrade to")
|
||||
|
||||
// Collect the upgrades kubeadm can do in this list
|
||||
@ -83,13 +91,13 @@ func GetAvailableUpgrades(versionGetterImpl VersionGetter, experimentalUpgradesA
|
||||
// Get the cluster version
|
||||
clusterVersionStr, clusterVersion, err := versionGetterImpl.ClusterVersion()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return upgrades, err
|
||||
}
|
||||
|
||||
// Get current kubeadm CLI version
|
||||
kubeadmVersionStr, kubeadmVersion, err := versionGetterImpl.KubeadmVersion()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return upgrades, err
|
||||
}
|
||||
|
||||
// Get and output the current latest stable version
|
||||
@ -103,11 +111,16 @@ func GetAvailableUpgrades(versionGetterImpl VersionGetter, experimentalUpgradesA
|
||||
// Get the kubelet versions in the cluster
|
||||
kubeletVersions, err := versionGetterImpl.KubeletVersions()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return upgrades, err
|
||||
}
|
||||
|
||||
// Get current etcd version
|
||||
etcdStatus, err := cluster.GetEtcdClusterStatus()
|
||||
etcdVersion, err := etcdClient.GetVersion()
|
||||
if err != nil {
|
||||
return upgrades, err
|
||||
}
|
||||
|
||||
dnsType, dnsVersion, err := dns.DeployedDNSAddon(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -115,10 +128,11 @@ func GetAvailableUpgrades(versionGetterImpl VersionGetter, experimentalUpgradesA
|
||||
// Construct a descriptor for the current state of the world
|
||||
beforeState := ClusterState{
|
||||
KubeVersion: clusterVersionStr,
|
||||
DNSVersion: dns.GetDNSVersion(clusterVersion, ActiveDNSAddon(featureGates)),
|
||||
DNSType: dnsType,
|
||||
DNSVersion: dnsVersion,
|
||||
KubeadmVersion: kubeadmVersionStr,
|
||||
KubeletVersions: kubeletVersions,
|
||||
EtcdVersion: etcdStatus.Version,
|
||||
EtcdVersion: etcdVersion,
|
||||
}
|
||||
|
||||
// Do a "dumb guess" that a new minor upgrade is available just because the latest stable version is higher than the cluster version
|
||||
@ -158,7 +172,8 @@ func GetAvailableUpgrades(versionGetterImpl VersionGetter, experimentalUpgradesA
|
||||
Before: beforeState,
|
||||
After: ClusterState{
|
||||
KubeVersion: patchVersionStr,
|
||||
DNSVersion: dns.GetDNSVersion(patchVersion, ActiveDNSAddon(featureGates)),
|
||||
DNSType: ActiveDNSAddon(featureGates),
|
||||
DNSVersion: kubeadmconstants.GetDNSVersion(ActiveDNSAddon(featureGates)),
|
||||
KubeadmVersion: newKubeadmVer,
|
||||
EtcdVersion: getSuggestedEtcdVersion(patchVersionStr),
|
||||
// KubeletVersions is unset here as it is not used anywhere in .After
|
||||
@ -174,7 +189,8 @@ func GetAvailableUpgrades(versionGetterImpl VersionGetter, experimentalUpgradesA
|
||||
Before: beforeState,
|
||||
After: ClusterState{
|
||||
KubeVersion: stableVersionStr,
|
||||
DNSVersion: dns.GetDNSVersion(stableVersion, ActiveDNSAddon(featureGates)),
|
||||
DNSType: ActiveDNSAddon(featureGates),
|
||||
DNSVersion: kubeadmconstants.GetDNSVersion(ActiveDNSAddon(featureGates)),
|
||||
KubeadmVersion: stableVersionStr,
|
||||
EtcdVersion: getSuggestedEtcdVersion(stableVersionStr),
|
||||
// KubeletVersions is unset here as it is not used anywhere in .After
|
||||
@ -201,7 +217,7 @@ func GetAvailableUpgrades(versionGetterImpl VersionGetter, experimentalUpgradesA
|
||||
// Get and output the current latest unstable version
|
||||
latestVersionStr, latestVersion, err := versionGetterImpl.VersionFromCILabel("latest", "experimental version")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return upgrades, err
|
||||
}
|
||||
|
||||
minorUnstable := latestVersion.Components()[1]
|
||||
@ -209,7 +225,7 @@ func GetAvailableUpgrades(versionGetterImpl VersionGetter, experimentalUpgradesA
|
||||
previousBranch := fmt.Sprintf("latest-1.%d", minorUnstable-1)
|
||||
previousBranchLatestVersionStr, previousBranchLatestVersion, err := versionGetterImpl.VersionFromCILabel(previousBranch, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return upgrades, err
|
||||
}
|
||||
|
||||
// If that previous latest version is an RC, RCs are allowed and the cluster version is lower than the RC version, show the upgrade
|
||||
@ -219,7 +235,8 @@ func GetAvailableUpgrades(versionGetterImpl VersionGetter, experimentalUpgradesA
|
||||
Before: beforeState,
|
||||
After: ClusterState{
|
||||
KubeVersion: previousBranchLatestVersionStr,
|
||||
DNSVersion: dns.GetDNSVersion(previousBranchLatestVersion, ActiveDNSAddon(featureGates)),
|
||||
DNSType: ActiveDNSAddon(featureGates),
|
||||
DNSVersion: kubeadmconstants.GetDNSVersion(ActiveDNSAddon(featureGates)),
|
||||
KubeadmVersion: previousBranchLatestVersionStr,
|
||||
EtcdVersion: getSuggestedEtcdVersion(previousBranchLatestVersionStr),
|
||||
// KubeletVersions is unset here as it is not used anywhere in .After
|
||||
@ -232,12 +249,12 @@ func GetAvailableUpgrades(versionGetterImpl VersionGetter, experimentalUpgradesA
|
||||
|
||||
// Default to assume that the experimental version to show is the unstable one
|
||||
unstableKubeVersion := latestVersionStr
|
||||
unstableKubeDNSVersion := dns.GetDNSVersion(latestVersion, ActiveDNSAddon(featureGates))
|
||||
unstableKubeDNSVersion := kubeadmconstants.GetDNSVersion(ActiveDNSAddon(featureGates))
|
||||
|
||||
// Ẃe should not display alpha.0. The previous branch's beta/rc versions are more relevant due how the kube branching process works.
|
||||
if latestVersion.PreRelease() == "alpha.0" {
|
||||
unstableKubeVersion = previousBranchLatestVersionStr
|
||||
unstableKubeDNSVersion = dns.GetDNSVersion(previousBranchLatestVersion, ActiveDNSAddon(featureGates))
|
||||
unstableKubeDNSVersion = kubeadmconstants.GetDNSVersion(ActiveDNSAddon(featureGates))
|
||||
}
|
||||
|
||||
upgrades = append(upgrades, Upgrade{
|
||||
@ -245,6 +262,7 @@ func GetAvailableUpgrades(versionGetterImpl VersionGetter, experimentalUpgradesA
|
||||
Before: beforeState,
|
||||
After: ClusterState{
|
||||
KubeVersion: unstableKubeVersion,
|
||||
DNSType: ActiveDNSAddon(featureGates),
|
||||
DNSVersion: unstableKubeDNSVersion,
|
||||
KubeadmVersion: unstableKubeVersion,
|
||||
EtcdVersion: getSuggestedEtcdVersion(unstableKubeVersion),
|
||||
@ -261,22 +279,23 @@ func GetAvailableUpgrades(versionGetterImpl VersionGetter, experimentalUpgradesA
|
||||
}
|
||||
|
||||
func getBranchFromVersion(version string) string {
|
||||
return strings.TrimPrefix(version, "v")[:3]
|
||||
v := versionutil.MustParseGeneric(version)
|
||||
return fmt.Sprintf("%d.%d", v.Major(), v.Minor())
|
||||
}
|
||||
|
||||
func patchVersionBranchExists(clusterVersion, stableVersion *version.Version) bool {
|
||||
func patchVersionBranchExists(clusterVersion, stableVersion *versionutil.Version) bool {
|
||||
return stableVersion.AtLeast(clusterVersion)
|
||||
}
|
||||
|
||||
func patchUpgradePossible(clusterVersion, patchVersion *version.Version) bool {
|
||||
func patchUpgradePossible(clusterVersion, patchVersion *versionutil.Version) bool {
|
||||
return clusterVersion.LessThan(patchVersion)
|
||||
}
|
||||
|
||||
func rcUpgradePossible(clusterVersion, previousBranchLatestVersion *version.Version) bool {
|
||||
func rcUpgradePossible(clusterVersion, previousBranchLatestVersion *versionutil.Version) bool {
|
||||
return strings.HasPrefix(previousBranchLatestVersion.PreRelease(), "rc") && clusterVersion.LessThan(previousBranchLatestVersion)
|
||||
}
|
||||
|
||||
func minorUpgradePossibleWithPatchRelease(stableVersion, patchVersion *version.Version) bool {
|
||||
func minorUpgradePossibleWithPatchRelease(stableVersion, patchVersion *versionutil.Version) bool {
|
||||
return patchVersion.LessThan(stableVersion)
|
||||
}
|
||||
|
||||
|
781
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/compute_test.go
generated
vendored
781
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/compute_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
124
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/configuration.go
generated
vendored
124
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/configuration.go
generated
vendored
@ -1,124 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package upgrade
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
)
|
||||
|
||||
// FetchConfiguration fetches configuration required for upgrading your cluster from a file (which has precedence) or a ConfigMap in the cluster
|
||||
func FetchConfiguration(client clientset.Interface, w io.Writer, cfgPath string) (*kubeadmapiext.MasterConfiguration, error) {
|
||||
fmt.Println("[upgrade/config] Making sure the configuration is correct:")
|
||||
|
||||
// Load the configuration from a file or the cluster
|
||||
configBytes, err := loadConfigurationBytes(client, w, cfgPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Take the versioned configuration populated from the configmap, default it and validate
|
||||
// Return the internal version of the API object
|
||||
versionedcfg, err := bytesToValidatedMasterConfig(configBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not decode configuration: %v", err)
|
||||
}
|
||||
return versionedcfg, nil
|
||||
}
|
||||
|
||||
// FetchConfigurationFromFile fetch configuration from a file
|
||||
func FetchConfigurationFromFile(cfgPath string) (*kubeadmapiext.MasterConfiguration, error) {
|
||||
// Load the configuration from a file or the cluster
|
||||
configBytes, err := ioutil.ReadFile(cfgPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Take the versioned configuration populated from the configmap, default it and validate
|
||||
// Return the internal version of the API object
|
||||
versionedcfg, err := bytesToValidatedMasterConfig(configBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not decode configuration: %v", err)
|
||||
}
|
||||
return versionedcfg, nil
|
||||
}
|
||||
|
||||
// loadConfigurationBytes loads the configuration byte slice from either a file or the cluster ConfigMap
|
||||
func loadConfigurationBytes(client clientset.Interface, w io.Writer, cfgPath string) ([]byte, error) {
|
||||
if cfgPath != "" {
|
||||
fmt.Printf("[upgrade/config] Reading configuration options from a file: %s\n", cfgPath)
|
||||
return ioutil.ReadFile(cfgPath)
|
||||
}
|
||||
|
||||
fmt.Println("[upgrade/config] Reading configuration from the cluster...")
|
||||
|
||||
configMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(constants.MasterConfigurationConfigMap, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
fmt.Printf("[upgrade/config] In order to upgrade, a ConfigMap called %q in the %s namespace must exist.\n", constants.MasterConfigurationConfigMap, metav1.NamespaceSystem)
|
||||
fmt.Println("[upgrade/config] Without this information, 'kubeadm upgrade' won't know how to configure your upgraded cluster.")
|
||||
fmt.Println("")
|
||||
fmt.Println("[upgrade/config] Next steps:")
|
||||
fmt.Printf("\t- OPTION 1: Run 'kubeadm config upload from-flags' and specify the same CLI arguments you passed to 'kubeadm init' when you created your master.\n")
|
||||
fmt.Printf("\t- OPTION 2: Run 'kubeadm config upload from-file' and specify the same config file you passed to 'kubeadm init' when you created your master.\n")
|
||||
fmt.Printf("\t- OPTION 3: Pass a config file to 'kubeadm upgrade' using the --config flag.\n")
|
||||
fmt.Println("")
|
||||
return []byte{}, fmt.Errorf("the ConfigMap %q in the %s namespace used for getting configuration information was not found", constants.MasterConfigurationConfigMap, metav1.NamespaceSystem)
|
||||
} else if err != nil {
|
||||
return []byte{}, fmt.Errorf("an unexpected error happened when trying to get the ConfigMap %q in the %s namespace: %v", constants.MasterConfigurationConfigMap, metav1.NamespaceSystem, err)
|
||||
}
|
||||
|
||||
fmt.Printf("[upgrade/config] FYI: You can look at this config file with 'kubectl -n %s get cm %s -oyaml'\n", metav1.NamespaceSystem, constants.MasterConfigurationConfigMap)
|
||||
return []byte(configMap.Data[constants.MasterConfigurationConfigMapKey]), nil
|
||||
}
|
||||
|
||||
// bytesToValidatedMasterConfig converts a byte array to an external, defaulted and validated configuration object
|
||||
func bytesToValidatedMasterConfig(b []byte) (*kubeadmapiext.MasterConfiguration, error) {
|
||||
cfg := &kubeadmapiext.MasterConfiguration{}
|
||||
finalCfg := &kubeadmapiext.MasterConfiguration{}
|
||||
internalcfg := &kubeadmapi.MasterConfiguration{}
|
||||
|
||||
if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), b, cfg); err != nil {
|
||||
return nil, fmt.Errorf("unable to decode config from bytes: %v", err)
|
||||
}
|
||||
// Default and convert to the internal version
|
||||
legacyscheme.Scheme.Default(cfg)
|
||||
legacyscheme.Scheme.Convert(cfg, internalcfg, nil)
|
||||
|
||||
// Applies dynamic defaults to settings not provided with flags
|
||||
if err := configutil.SetInitDynamicDefaults(internalcfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Validates cfg (flags/configs + defaults + dynamic defaults)
|
||||
if err := validation.ValidateMasterConfiguration(internalcfg).ToAggregate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Finally converts back to the external version
|
||||
legacyscheme.Scheme.Convert(internalcfg, finalCfg, nil)
|
||||
return finalCfg, nil
|
||||
}
|
5
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/policy.go
generated
vendored
5
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/policy.go
generated
vendored
@ -116,6 +116,11 @@ func EnforceVersionPolicies(versionGetter VersionGetter, newK8sVersionStr string
|
||||
}
|
||||
}
|
||||
|
||||
if kubeadmVersion.Major() > newK8sVersion.Major() ||
|
||||
kubeadmVersion.Minor() > newK8sVersion.Minor() {
|
||||
skewErrors.Skippable = append(skewErrors.Skippable, fmt.Errorf("Kubeadm version %s can only be used to upgrade to Kubernetes version %d.%d", kubeadmVersionStr, kubeadmVersion.Major(), kubeadmVersion.Minor()))
|
||||
}
|
||||
|
||||
// Detect if the version is unstable and the user didn't allow that
|
||||
if err = detectUnstableVersionError(newK8sVersion, newK8sVersionStr, allowExperimentalUpgrades, allowRCUpgrades); err != nil {
|
||||
skewErrors.Skippable = append(skewErrors.Skippable, err)
|
||||
|
220
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/policy_test.go
generated
vendored
220
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/policy_test.go
generated
vendored
@ -24,169 +24,199 @@ import (
|
||||
|
||||
func TestEnforceVersionPolicies(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
vg *fakeVersionGetter
|
||||
expectedMandatoryErrs int
|
||||
expectedSkippableErrs int
|
||||
allowExperimental, allowRCs bool
|
||||
newK8sVersion string
|
||||
}{
|
||||
{ // everything ok
|
||||
{
|
||||
name: "minor upgrade",
|
||||
vg: &fakeVersionGetter{
|
||||
clusterVersion: "v1.9.3",
|
||||
kubeletVersion: "v1.9.3",
|
||||
kubeadmVersion: "v1.9.5",
|
||||
clusterVersion: "v1.10.3",
|
||||
kubeletVersion: "v1.10.3",
|
||||
kubeadmVersion: "v1.10.5",
|
||||
},
|
||||
newK8sVersion: "v1.9.5",
|
||||
newK8sVersion: "v1.10.5",
|
||||
},
|
||||
{ // everything ok
|
||||
{
|
||||
name: "major upgrade",
|
||||
vg: &fakeVersionGetter{
|
||||
clusterVersion: "v1.9.3",
|
||||
kubeletVersion: "v1.9.2",
|
||||
kubeadmVersion: "v1.10.1",
|
||||
clusterVersion: "v1.10.3",
|
||||
kubeletVersion: "v1.10.2",
|
||||
kubeadmVersion: "v1.11.1",
|
||||
},
|
||||
newK8sVersion: "v1.10.0",
|
||||
newK8sVersion: "v1.11.0",
|
||||
},
|
||||
{ // downgrades ok
|
||||
{
|
||||
name: "downgrade",
|
||||
vg: &fakeVersionGetter{
|
||||
clusterVersion: "v1.9.3",
|
||||
kubeletVersion: "v1.9.3",
|
||||
kubeadmVersion: "v1.9.3",
|
||||
clusterVersion: "v1.10.3",
|
||||
kubeletVersion: "v1.10.3",
|
||||
kubeadmVersion: "v1.10.3",
|
||||
},
|
||||
newK8sVersion: "v1.9.2",
|
||||
newK8sVersion: "v1.10.2",
|
||||
},
|
||||
{ // upgrades without bumping the version number ok
|
||||
{
|
||||
name: "same version upgrade",
|
||||
vg: &fakeVersionGetter{
|
||||
clusterVersion: "v1.9.3",
|
||||
kubeletVersion: "v1.9.3",
|
||||
kubeadmVersion: "v1.9.3",
|
||||
clusterVersion: "v1.10.3",
|
||||
kubeletVersion: "v1.10.3",
|
||||
kubeadmVersion: "v1.10.3",
|
||||
},
|
||||
newK8sVersion: "v1.9.3",
|
||||
newK8sVersion: "v1.10.3",
|
||||
},
|
||||
{ // new version must be higher than v1.9.0
|
||||
{
|
||||
name: "new version must be higher than v1.10.0",
|
||||
vg: &fakeVersionGetter{
|
||||
clusterVersion: "v1.9.3",
|
||||
kubeletVersion: "v1.9.3",
|
||||
kubeadmVersion: "v1.9.3",
|
||||
clusterVersion: "v1.10.3",
|
||||
kubeletVersion: "v1.10.3",
|
||||
kubeadmVersion: "v1.10.3",
|
||||
},
|
||||
newK8sVersion: "v1.8.10",
|
||||
expectedMandatoryErrs: 1, // version must be higher than v1.9.0
|
||||
newK8sVersion: "v1.9.10",
|
||||
expectedMandatoryErrs: 1, // version must be higher than v1.10.0
|
||||
expectedSkippableErrs: 1, // can't upgrade old k8s with newer kubeadm
|
||||
},
|
||||
{ // upgrading two minor versions in one go is not supported
|
||||
{
|
||||
name: "upgrading two minor versions in one go is not supported",
|
||||
vg: &fakeVersionGetter{
|
||||
clusterVersion: "v1.9.3",
|
||||
kubeletVersion: "v1.9.3",
|
||||
kubeadmVersion: "v1.11.0",
|
||||
clusterVersion: "v1.10.3",
|
||||
kubeletVersion: "v1.10.3",
|
||||
kubeadmVersion: "v1.12.0",
|
||||
},
|
||||
newK8sVersion: "v1.11.0",
|
||||
newK8sVersion: "v1.12.0",
|
||||
expectedMandatoryErrs: 1, // can't upgrade two minor versions
|
||||
expectedSkippableErrs: 1, // kubelet <-> apiserver skew too large
|
||||
},
|
||||
{ // downgrading two minor versions in one go is not supported
|
||||
{
|
||||
name: "downgrading two minor versions in one go is not supported",
|
||||
vg: &fakeVersionGetter{
|
||||
clusterVersion: "v1.11.3",
|
||||
kubeletVersion: "v1.11.3",
|
||||
kubeadmVersion: "v1.11.0",
|
||||
clusterVersion: "v1.12.3",
|
||||
kubeletVersion: "v1.12.3",
|
||||
kubeadmVersion: "v1.12.0",
|
||||
},
|
||||
newK8sVersion: "v1.9.3",
|
||||
newK8sVersion: "v1.10.3",
|
||||
expectedMandatoryErrs: 1, // can't downgrade two minor versions
|
||||
expectedSkippableErrs: 1, // can't upgrade old k8s with newer kubeadm
|
||||
},
|
||||
{ // kubeadm version must be higher than the new kube version. However, patch version skews may be forced
|
||||
{
|
||||
name: "kubeadm version must be higher than the new kube version. However, patch version skews may be forced",
|
||||
vg: &fakeVersionGetter{
|
||||
clusterVersion: "v1.9.3",
|
||||
kubeletVersion: "v1.9.3",
|
||||
kubeadmVersion: "v1.9.3",
|
||||
clusterVersion: "v1.10.3",
|
||||
kubeletVersion: "v1.10.3",
|
||||
kubeadmVersion: "v1.10.3",
|
||||
},
|
||||
newK8sVersion: "v1.9.5",
|
||||
newK8sVersion: "v1.10.5",
|
||||
expectedSkippableErrs: 1,
|
||||
},
|
||||
{ // kubeadm version must be higher than the new kube version. Trying to upgrade k8s to a higher minor version than kubeadm itself should never be supported
|
||||
{
|
||||
name: "kubeadm version must be higher than the new kube version. Trying to upgrade k8s to a higher minor version than kubeadm itself should never be supported",
|
||||
vg: &fakeVersionGetter{
|
||||
clusterVersion: "v1.9.3",
|
||||
kubeletVersion: "v1.9.3",
|
||||
kubeadmVersion: "v1.9.3",
|
||||
clusterVersion: "v1.10.3",
|
||||
kubeletVersion: "v1.10.3",
|
||||
kubeadmVersion: "v1.10.3",
|
||||
},
|
||||
newK8sVersion: "v1.10.0",
|
||||
newK8sVersion: "v1.11.0",
|
||||
expectedMandatoryErrs: 1,
|
||||
},
|
||||
{ // the maximum skew between the cluster version and the kubelet versions should be one minor version. This may be forced through though.
|
||||
{
|
||||
name: "the maximum skew between the cluster version and the kubelet versions should be one minor version. This may be forced through though.",
|
||||
vg: &fakeVersionGetter{
|
||||
clusterVersion: "v1.9.3",
|
||||
kubeletVersion: "v1.8.8",
|
||||
kubeadmVersion: "v1.10.0",
|
||||
clusterVersion: "v1.10.3",
|
||||
kubeletVersion: "v1.9.8",
|
||||
kubeadmVersion: "v1.11.0",
|
||||
},
|
||||
newK8sVersion: "v1.10.0",
|
||||
newK8sVersion: "v1.11.0",
|
||||
expectedSkippableErrs: 1,
|
||||
},
|
||||
{ // experimental upgrades supported if the flag is set
|
||||
{
|
||||
name: "experimental upgrades supported if the flag is set",
|
||||
vg: &fakeVersionGetter{
|
||||
clusterVersion: "v1.9.3",
|
||||
kubeletVersion: "v1.9.3",
|
||||
kubeadmVersion: "v1.10.0-beta.1",
|
||||
clusterVersion: "v1.10.3",
|
||||
kubeletVersion: "v1.10.3",
|
||||
kubeadmVersion: "v1.11.0-beta.1",
|
||||
},
|
||||
newK8sVersion: "v1.10.0-beta.1",
|
||||
newK8sVersion: "v1.11.0-beta.1",
|
||||
allowExperimental: true,
|
||||
},
|
||||
{ // release candidate upgrades supported if the flag is set
|
||||
{
|
||||
name: "release candidate upgrades supported if the flag is set",
|
||||
vg: &fakeVersionGetter{
|
||||
clusterVersion: "v1.9.3",
|
||||
kubeletVersion: "v1.9.3",
|
||||
kubeadmVersion: "v1.10.0-rc.1",
|
||||
clusterVersion: "v1.10.3",
|
||||
kubeletVersion: "v1.10.3",
|
||||
kubeadmVersion: "v1.11.0-rc.1",
|
||||
},
|
||||
newK8sVersion: "v1.10.0-rc.1",
|
||||
newK8sVersion: "v1.11.0-rc.1",
|
||||
allowRCs: true,
|
||||
},
|
||||
{ // release candidate upgrades supported if the flag is set
|
||||
{
|
||||
name: "release candidate upgrades supported if the flag is set",
|
||||
vg: &fakeVersionGetter{
|
||||
clusterVersion: "v1.9.3",
|
||||
kubeletVersion: "v1.9.3",
|
||||
kubeadmVersion: "v1.10.0-rc.1",
|
||||
clusterVersion: "v1.10.3",
|
||||
kubeletVersion: "v1.10.3",
|
||||
kubeadmVersion: "v1.11.0-rc.1",
|
||||
},
|
||||
newK8sVersion: "v1.10.0-rc.1",
|
||||
newK8sVersion: "v1.11.0-rc.1",
|
||||
allowExperimental: true,
|
||||
},
|
||||
{ // the user should not be able to upgrade to an experimental version if they haven't opted into that
|
||||
{
|
||||
name: "the user should not be able to upgrade to an experimental version if they haven't opted into that",
|
||||
vg: &fakeVersionGetter{
|
||||
clusterVersion: "v1.9.3",
|
||||
kubeletVersion: "v1.9.3",
|
||||
kubeadmVersion: "v1.10.0-beta.1",
|
||||
clusterVersion: "v1.10.3",
|
||||
kubeletVersion: "v1.10.3",
|
||||
kubeadmVersion: "v1.11.0-beta.1",
|
||||
},
|
||||
newK8sVersion: "v1.10.0-beta.1",
|
||||
newK8sVersion: "v1.11.0-beta.1",
|
||||
allowRCs: true,
|
||||
expectedSkippableErrs: 1,
|
||||
},
|
||||
{ // the user should not be able to upgrade to an release candidate version if they haven't opted into that
|
||||
{
|
||||
name: "the user should not be able to upgrade to an release candidate version if they haven't opted into that",
|
||||
vg: &fakeVersionGetter{
|
||||
clusterVersion: "v1.9.3",
|
||||
kubeletVersion: "v1.9.3",
|
||||
kubeadmVersion: "v1.10.0-rc.1",
|
||||
clusterVersion: "v1.10.3",
|
||||
kubeletVersion: "v1.10.3",
|
||||
kubeadmVersion: "v1.11.0-rc.1",
|
||||
},
|
||||
newK8sVersion: "v1.10.0-rc.1",
|
||||
newK8sVersion: "v1.11.0-rc.1",
|
||||
expectedSkippableErrs: 1,
|
||||
},
|
||||
{
|
||||
name: "the user can't use a newer minor version of kubeadm to upgrade an older version of kubeadm",
|
||||
vg: &fakeVersionGetter{
|
||||
clusterVersion: "v1.10.3",
|
||||
kubeletVersion: "v1.10.3",
|
||||
kubeadmVersion: "v1.11.0",
|
||||
},
|
||||
newK8sVersion: "v1.10.6",
|
||||
expectedSkippableErrs: 1, // can't upgrade old k8s with newer kubeadm
|
||||
},
|
||||
}
|
||||
|
||||
for _, rt := range tests {
|
||||
t.Run(rt.name, func(t *testing.T) {
|
||||
|
||||
newK8sVer, err := version.ParseSemantic(rt.newK8sVersion)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't parse version %s: %v", rt.newK8sVersion, err)
|
||||
}
|
||||
|
||||
actualSkewErrs := EnforceVersionPolicies(rt.vg, rt.newK8sVersion, newK8sVer, rt.allowExperimental, rt.allowRCs)
|
||||
if actualSkewErrs == nil {
|
||||
// No errors were seen. Report unit test failure if we expected to see errors
|
||||
if rt.expectedMandatoryErrs+rt.expectedSkippableErrs > 0 {
|
||||
t.Errorf("failed TestEnforceVersionPolicies\n\texpected errors but got none")
|
||||
newK8sVer, err := version.ParseSemantic(rt.newK8sVersion)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't parse version %s: %v", rt.newK8sVersion, err)
|
||||
}
|
||||
// Otherwise, just move on with the next test
|
||||
continue
|
||||
}
|
||||
|
||||
if len(actualSkewErrs.Skippable) != rt.expectedSkippableErrs {
|
||||
t.Errorf("failed TestEnforceVersionPolicies\n\texpected skippable errors: %d\n\tgot skippable errors: %d %v", rt.expectedSkippableErrs, len(actualSkewErrs.Skippable), *rt.vg)
|
||||
}
|
||||
if len(actualSkewErrs.Mandatory) != rt.expectedMandatoryErrs {
|
||||
t.Errorf("failed TestEnforceVersionPolicies\n\texpected mandatory errors: %d\n\tgot mandatory errors: %d %v", rt.expectedMandatoryErrs, len(actualSkewErrs.Mandatory), *rt.vg)
|
||||
}
|
||||
actualSkewErrs := EnforceVersionPolicies(rt.vg, rt.newK8sVersion, newK8sVer, rt.allowExperimental, rt.allowRCs)
|
||||
if actualSkewErrs == nil {
|
||||
// No errors were seen. Report unit test failure if we expected to see errors
|
||||
if rt.expectedMandatoryErrs+rt.expectedSkippableErrs > 0 {
|
||||
t.Errorf("failed TestEnforceVersionPolicies\n\texpected errors but got none")
|
||||
}
|
||||
// Otherwise, just move on with the next test
|
||||
return
|
||||
}
|
||||
|
||||
if len(actualSkewErrs.Skippable) != rt.expectedSkippableErrs {
|
||||
t.Errorf("failed TestEnforceVersionPolicies\n\texpected skippable errors: %d\n\tgot skippable errors: %d %v", rt.expectedSkippableErrs, len(actualSkewErrs.Skippable), *rt.vg)
|
||||
}
|
||||
if len(actualSkewErrs.Mandatory) != rt.expectedMandatoryErrs {
|
||||
t.Errorf("failed TestEnforceVersionPolicies\n\texpected mandatory errors: %d\n\tgot mandatory errors: %d %v", rt.expectedMandatoryErrs, len(actualSkewErrs.Mandatory), *rt.vg)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
224
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/postupgrade.go
generated
vendored
224
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/postupgrade.go
generated
vendored
@ -18,15 +18,18 @@ package upgrade
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
|
||||
kubeadmapiv1alpha2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha2"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/features"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns"
|
||||
@ -34,6 +37,8 @@ import (
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo"
|
||||
nodebootstraptoken "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node"
|
||||
certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
|
||||
kubeletphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet"
|
||||
patchnodephase "k8s.io/kubernetes/cmd/kubeadm/app/phases/patchnode"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
@ -41,6 +46,8 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/version"
|
||||
)
|
||||
|
||||
var expiry = 180 * 24 * time.Hour
|
||||
|
||||
// PerformPostUpgradeTasks runs nearly the same functions as 'kubeadm init' would do
|
||||
// Note that the markmaster phase is left out, not needed, and no token is created as that doesn't belong to the upgrade
|
||||
func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterConfiguration, newK8sVer *version.Version, dryRun bool) error {
|
||||
@ -53,6 +60,23 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterC
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
// Create the new, version-branched kubelet ComponentConfig ConfigMap
|
||||
if err := kubeletphase.CreateConfigMap(cfg, client); err != nil {
|
||||
errs = append(errs, fmt.Errorf("error creating kubelet configuration ConfigMap: %v", err))
|
||||
}
|
||||
|
||||
// Write the new kubelet config down to disk and the env file if needed
|
||||
if err := writeKubeletConfigFiles(client, cfg, newK8sVer, dryRun); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
// Annotate the node with the crisocket information, sourced either from the MasterConfiguration struct or
|
||||
// --cri-socket.
|
||||
// TODO: In the future we want to use something more official like NodeStatus or similar for detecting this properly
|
||||
if err := patchnodephase.AnnotateCRISocket(client, cfg.NodeRegistration.Name, cfg.NodeRegistration.CRISocket); err != nil {
|
||||
errs = append(errs, fmt.Errorf("error uploading crisocket: %v", err))
|
||||
}
|
||||
|
||||
// Create/update RBAC rules that makes the bootstrap tokens able to post CSRs
|
||||
if err := nodebootstraptoken.AllowBootstrapTokensToPostCSRs(client); err != nil {
|
||||
errs = append(errs, err)
|
||||
@ -69,7 +93,7 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterC
|
||||
}
|
||||
|
||||
// Upgrade to a self-hosted control plane if possible
|
||||
if err := upgradeToSelfHosting(client, cfg, newK8sVer, dryRun); err != nil {
|
||||
if err := upgradeToSelfHosting(client, cfg, dryRun); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
@ -83,30 +107,18 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterC
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
certAndKeyDir := kubeadmapiext.DefaultCertificatesDir
|
||||
shouldBackup, err := shouldBackupAPIServerCertAndKey(certAndKeyDir, newK8sVer)
|
||||
// Don't fail the upgrade phase if failing to determine to backup kube-apiserver cert and key.
|
||||
if err != nil {
|
||||
fmt.Printf("[postupgrade] WARNING: failed to determine to backup kube-apiserver cert and key: %v", err)
|
||||
} else if shouldBackup {
|
||||
// Don't fail the upgrade phase if failing to backup kube-apiserver cert and key.
|
||||
if err := backupAPIServerCertAndKey(certAndKeyDir); err != nil {
|
||||
fmt.Printf("[postupgrade] WARNING: failed to backup kube-apiserver cert and key: %v", err)
|
||||
}
|
||||
if err := certsphase.CreateAPIServerCertAndKeyFiles(cfg); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
// Rotate the kube-apiserver cert and key if needed
|
||||
if err := backupAPIServerCertIfNeeded(cfg, dryRun); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
// Upgrade kube-dns and kube-proxy
|
||||
// Upgrade kube-dns/CoreDNS and kube-proxy
|
||||
if err := dns.EnsureDNSAddon(cfg, client); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
// Remove the old kube-dns deployment if coredns is now used
|
||||
if !dryRun {
|
||||
if err := removeOldKubeDNSDeploymentIfCoreDNSIsUsed(cfg, client); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
// Remove the old DNS deployment if a new DNS service is now used (kube-dns to CoreDNS or vice versa)
|
||||
if err := removeOldDNSDeploymentIfAnotherDNSIsUsed(cfg, client, dryRun); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
if err := proxy.EnsureProxyAddon(cfg, client); err != nil {
|
||||
@ -115,28 +127,39 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterC
|
||||
return errors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func removeOldKubeDNSDeploymentIfCoreDNSIsUsed(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
|
||||
if features.Enabled(cfg.FeatureGates, features.CoreDNS) {
|
||||
return apiclient.TryRunCommand(func() error {
|
||||
coreDNSDeployment, err := client.AppsV1().Deployments(metav1.NamespaceSystem).Get(kubeadmconstants.CoreDNS, metav1.GetOptions{})
|
||||
func removeOldDNSDeploymentIfAnotherDNSIsUsed(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface, dryRun bool) error {
|
||||
return apiclient.TryRunCommand(func() error {
|
||||
installedDeploymentName := kubeadmconstants.KubeDNS
|
||||
deploymentToDelete := kubeadmconstants.CoreDNS
|
||||
|
||||
if features.Enabled(cfg.FeatureGates, features.CoreDNS) {
|
||||
installedDeploymentName = kubeadmconstants.CoreDNS
|
||||
deploymentToDelete = kubeadmconstants.KubeDNS
|
||||
}
|
||||
|
||||
// If we're dry-running, we don't need to wait for the new DNS addon to become ready
|
||||
if !dryRun {
|
||||
dnsDeployment, err := client.AppsV1().Deployments(metav1.NamespaceSystem).Get(installedDeploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if coreDNSDeployment.Status.ReadyReplicas == 0 {
|
||||
return fmt.Errorf("the CoreDNS deployment isn't ready yet")
|
||||
if dnsDeployment.Status.ReadyReplicas == 0 {
|
||||
return fmt.Errorf("the DNS deployment isn't ready yet")
|
||||
}
|
||||
err = apiclient.DeleteDeploymentForeground(client, metav1.NamespaceSystem, kubeadmconstants.KubeDNS)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, 10)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// We don't want to wait for the DNS deployment above to become ready when dryrunning (as it never will)
|
||||
// but here we should execute the DELETE command against the dryrun clientset, as it will only be logged
|
||||
err := apiclient.DeleteDeploymentForeground(client, metav1.NamespaceSystem, deploymentToDelete)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, 10)
|
||||
}
|
||||
|
||||
func upgradeToSelfHosting(client clientset.Interface, cfg *kubeadmapi.MasterConfiguration, newK8sVer *version.Version, dryRun bool) error {
|
||||
if features.Enabled(cfg.FeatureGates, features.SelfHosting) && !IsControlPlaneSelfHosted(client) && newK8sVer.AtLeast(v190alpha3) {
|
||||
func upgradeToSelfHosting(client clientset.Interface, cfg *kubeadmapi.MasterConfiguration, dryRun bool) error {
|
||||
if features.Enabled(cfg.FeatureGates, features.SelfHosting) && !IsControlPlaneSelfHosted(client) {
|
||||
|
||||
waiter := getWaiter(dryRun, client)
|
||||
|
||||
@ -149,6 +172,69 @@ func upgradeToSelfHosting(client clientset.Interface, cfg *kubeadmapi.MasterConf
|
||||
return nil
|
||||
}
|
||||
|
||||
func backupAPIServerCertIfNeeded(cfg *kubeadmapi.MasterConfiguration, dryRun bool) error {
|
||||
certAndKeyDir := kubeadmapiv1alpha2.DefaultCertificatesDir
|
||||
shouldBackup, err := shouldBackupAPIServerCertAndKey(certAndKeyDir)
|
||||
if err != nil {
|
||||
// Don't fail the upgrade phase if failing to determine to backup kube-apiserver cert and key.
|
||||
return fmt.Errorf("[postupgrade] WARNING: failed to determine to backup kube-apiserver cert and key: %v", err)
|
||||
}
|
||||
|
||||
if !shouldBackup {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If dry-running, just say that this would happen to the user and exit
|
||||
if dryRun {
|
||||
fmt.Println("[postupgrade] Would rotate the API server certificate and key.")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Don't fail the upgrade phase if failing to backup kube-apiserver cert and key, just continue rotating the cert
|
||||
// TODO: We might want to reconsider this choice.
|
||||
if err := backupAPIServerCertAndKey(certAndKeyDir); err != nil {
|
||||
fmt.Printf("[postupgrade] WARNING: failed to backup kube-apiserver cert and key: %v", err)
|
||||
}
|
||||
return certsphase.CreateAPIServerCertAndKeyFiles(cfg)
|
||||
}
|
||||
|
||||
func writeKubeletConfigFiles(client clientset.Interface, cfg *kubeadmapi.MasterConfiguration, newK8sVer *version.Version, dryRun bool) error {
|
||||
kubeletDir, err := getKubeletDir(dryRun)
|
||||
if err != nil {
|
||||
// The error here should never occur in reality, would only be thrown if /tmp doesn't exist on the machine.
|
||||
return err
|
||||
}
|
||||
errs := []error{}
|
||||
// Write the configuration for the kubelet down to disk so the upgraded kubelet can start with fresh config
|
||||
if err := kubeletphase.DownloadConfig(client, newK8sVer, kubeletDir); err != nil {
|
||||
// Tolerate the error being NotFound when dryrunning, as there is a pretty common scenario: the dryrun process
|
||||
// *would* post the new kubelet-config-1.X configmap that doesn't exist now when we're trying to download it
|
||||
// again.
|
||||
if !(apierrors.IsNotFound(err) && dryRun) {
|
||||
errs = append(errs, fmt.Errorf("error downloading kubelet configuration from the ConfigMap: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
if dryRun { // Print what contents would be written
|
||||
dryrunutil.PrintDryRunFile(kubeadmconstants.KubeletConfigurationFileName, kubeletDir, kubeadmconstants.KubeletRunDirectory, os.Stdout)
|
||||
}
|
||||
|
||||
envFilePath := filepath.Join(kubeadmconstants.KubeletRunDirectory, kubeadmconstants.KubeletEnvFileName)
|
||||
if _, err := os.Stat(envFilePath); os.IsNotExist(err) {
|
||||
// Write env file with flags for the kubelet to use. We do not need to write the --register-with-taints for the master,
|
||||
// as we handle that ourselves in the markmaster phase
|
||||
// TODO: Maybe we want to do that some time in the future, in order to remove some logic from the markmaster phase?
|
||||
if err := kubeletphase.WriteKubeletDynamicEnvFile(&cfg.NodeRegistration, cfg.FeatureGates, false, kubeletDir); err != nil {
|
||||
errs = append(errs, fmt.Errorf("error writing a dynamic environment file for the kubelet: %v", err))
|
||||
}
|
||||
|
||||
if dryRun { // Print what contents would be written
|
||||
dryrunutil.PrintDryRunFile(kubeadmconstants.KubeletEnvFileName, kubeletDir, kubeadmconstants.KubeletRunDirectory, os.Stdout)
|
||||
}
|
||||
}
|
||||
return errors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
// getWaiter gets the right waiter implementation for the right occasion
|
||||
// TODO: Consolidate this with what's in init.go?
|
||||
func getWaiter(dryRun bool, client clientset.Interface) apiclient.Waiter {
|
||||
@ -157,3 +243,67 @@ func getWaiter(dryRun bool, client clientset.Interface) apiclient.Waiter {
|
||||
}
|
||||
return apiclient.NewKubeWaiter(client, 30*time.Minute, os.Stdout)
|
||||
}
|
||||
|
||||
// getKubeletDir gets the kubelet directory based on whether the user is dry-running this command or not.
|
||||
// TODO: Consolidate this with similar funcs?
|
||||
func getKubeletDir(dryRun bool) (string, error) {
|
||||
if dryRun {
|
||||
return ioutil.TempDir("", "kubeadm-upgrade-dryrun")
|
||||
}
|
||||
return kubeadmconstants.KubeletRunDirectory, nil
|
||||
}
|
||||
|
||||
// backupAPIServerCertAndKey backups the old cert and key of kube-apiserver to a specified directory.
|
||||
func backupAPIServerCertAndKey(certAndKeyDir string) error {
|
||||
subDir := filepath.Join(certAndKeyDir, "expired")
|
||||
if err := os.Mkdir(subDir, 0766); err != nil {
|
||||
return fmt.Errorf("failed to created backup directory %s: %v", subDir, err)
|
||||
}
|
||||
|
||||
filesToMove := map[string]string{
|
||||
filepath.Join(certAndKeyDir, kubeadmconstants.APIServerCertName): filepath.Join(subDir, kubeadmconstants.APIServerCertName),
|
||||
filepath.Join(certAndKeyDir, kubeadmconstants.APIServerKeyName): filepath.Join(subDir, kubeadmconstants.APIServerKeyName),
|
||||
}
|
||||
return moveFiles(filesToMove)
|
||||
}
|
||||
|
||||
// moveFiles moves files from one directory to another.
|
||||
func moveFiles(files map[string]string) error {
|
||||
filesToRecover := map[string]string{}
|
||||
for from, to := range files {
|
||||
if err := os.Rename(from, to); err != nil {
|
||||
return rollbackFiles(filesToRecover, err)
|
||||
}
|
||||
filesToRecover[to] = from
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// rollbackFiles moves the files back to the original directory.
|
||||
func rollbackFiles(files map[string]string, originalErr error) error {
|
||||
errs := []error{originalErr}
|
||||
for from, to := range files {
|
||||
if err := os.Rename(from, to); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("couldn't move these files: %v. Got errors: %v", files, errors.NewAggregate(errs))
|
||||
}
|
||||
|
||||
// shouldBackupAPIServerCertAndKey checks if the cert of kube-apiserver will be expired in 180 days.
|
||||
func shouldBackupAPIServerCertAndKey(certAndKeyDir string) (bool, error) {
|
||||
apiServerCert := filepath.Join(certAndKeyDir, kubeadmconstants.APIServerCertName)
|
||||
certs, err := certutil.CertsFromFile(apiServerCert)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("couldn't load the certificate file %s: %v", apiServerCert, err)
|
||||
}
|
||||
if len(certs) == 0 {
|
||||
return false, fmt.Errorf("no certificate data found")
|
||||
}
|
||||
|
||||
if time.Now().Sub(certs[0].NotBefore) > expiry {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
@ -29,7 +29,6 @@ import (
|
||||
certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil"
|
||||
testutil "k8s.io/kubernetes/cmd/kubeadm/test"
|
||||
"k8s.io/kubernetes/pkg/util/version"
|
||||
)
|
||||
|
||||
func TestBackupAPIServerCertAndKey(t *testing.T) {
|
||||
@ -132,27 +131,20 @@ func TestRollbackFiles(t *testing.T) {
|
||||
|
||||
func TestShouldBackupAPIServerCertAndKey(t *testing.T) {
|
||||
cfg := &kubeadmapi.MasterConfiguration{
|
||||
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"},
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
|
||||
NodeName: "test-node",
|
||||
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"},
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
|
||||
NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "test-node"},
|
||||
}
|
||||
|
||||
for desc, test := range map[string]struct {
|
||||
adjustedExpiry time.Duration
|
||||
k8sVersion *version.Version
|
||||
expected bool
|
||||
}{
|
||||
"1.8 version doesn't need to backup": {
|
||||
k8sVersion: version.MustParseSemantic("v1.8.0"),
|
||||
expected: false,
|
||||
"default: cert not older than 180 days doesn't needs to backup": {
|
||||
expected: false,
|
||||
},
|
||||
"1.9 version with cert not older than 180 days doesn't needs to backup": {
|
||||
k8sVersion: version.MustParseSemantic("v1.9.0"),
|
||||
expected: false,
|
||||
},
|
||||
"1.9 version with cert older than 180 days need to backup": {
|
||||
"cert older than 180 days need to backup": {
|
||||
adjustedExpiry: expiry + 100*time.Hour,
|
||||
k8sVersion: version.MustParseSemantic("v1.9.0"),
|
||||
expected: true,
|
||||
},
|
||||
} {
|
||||
@ -180,7 +172,7 @@ func TestShouldBackupAPIServerCertAndKey(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
shouldBackup, err := shouldBackupAPIServerCertAndKey(tmpdir, test.k8sVersion)
|
||||
shouldBackup, err := shouldBackupAPIServerCertAndKey(tmpdir)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %s: failed to check shouldBackupAPIServerCertAndKey: %v", desc, err)
|
||||
}
|
106
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/postupgrade_v18_19.go
generated
vendored
106
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/postupgrade_v18_19.go
generated
vendored
@ -1,106 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package upgrade
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/pkg/util/version"
|
||||
)
|
||||
|
||||
// TODO: Maybe move these constants elsewhere in future releases
|
||||
var v190 = version.MustParseSemantic("v1.9.0")
|
||||
var v190alpha3 = version.MustParseSemantic("v1.9.0-alpha.3")
|
||||
var expiry = 180 * 24 * time.Hour
|
||||
|
||||
// backupAPIServerCertAndKey backups the old cert and key of kube-apiserver to a specified directory.
|
||||
func backupAPIServerCertAndKey(certAndKeyDir string) error {
|
||||
subDir := filepath.Join(certAndKeyDir, "expired")
|
||||
if err := os.Mkdir(subDir, 0766); err != nil {
|
||||
return fmt.Errorf("failed to created backup directory %s: %v", subDir, err)
|
||||
}
|
||||
|
||||
filesToMove := map[string]string{
|
||||
filepath.Join(certAndKeyDir, constants.APIServerCertName): filepath.Join(subDir, constants.APIServerCertName),
|
||||
filepath.Join(certAndKeyDir, constants.APIServerKeyName): filepath.Join(subDir, constants.APIServerKeyName),
|
||||
}
|
||||
return moveFiles(filesToMove)
|
||||
}
|
||||
|
||||
// moveFiles moves files from one directory to another.
|
||||
func moveFiles(files map[string]string) error {
|
||||
filesToRecover := map[string]string{}
|
||||
for from, to := range files {
|
||||
if err := os.Rename(from, to); err != nil {
|
||||
return rollbackFiles(filesToRecover, err)
|
||||
}
|
||||
filesToRecover[to] = from
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// rollbackFiles moves the files back to the original directory.
|
||||
func rollbackFiles(files map[string]string, originalErr error) error {
|
||||
errs := []error{originalErr}
|
||||
for from, to := range files {
|
||||
if err := os.Rename(from, to); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("couldn't move these files: %v. Got errors: %v", files, errors.NewAggregate(errs))
|
||||
}
|
||||
|
||||
// shouldBackupAPIServerCertAndKey check if the new k8s version is at least 1.9.0
|
||||
// and kube-apiserver will be expired in 60 days.
|
||||
func shouldBackupAPIServerCertAndKey(certAndKeyDir string, newK8sVer *version.Version) (bool, error) {
|
||||
if newK8sVer.LessThan(v190) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
apiServerCert := filepath.Join(certAndKeyDir, constants.APIServerCertName)
|
||||
data, err := ioutil.ReadFile(apiServerCert)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to read kube-apiserver certificate from disk: %v", err)
|
||||
}
|
||||
|
||||
block, _ := pem.Decode(data)
|
||||
if block == nil {
|
||||
return false, fmt.Errorf("expected the kube-apiserver certificate to be PEM encoded")
|
||||
}
|
||||
|
||||
certs, err := x509.ParseCertificates(block.Bytes)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("unable to parse certificate data: %v", err)
|
||||
}
|
||||
if len(certs) == 0 {
|
||||
return false, fmt.Errorf("no certificate data found")
|
||||
}
|
||||
|
||||
if time.Now().Sub(certs[0].NotBefore) > expiry {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
2
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/prepull.go
generated
vendored
2
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/prepull.go
generated
vendored
@ -87,7 +87,7 @@ func (d *DaemonSetPrepuller) DeleteFunc(component string) error {
|
||||
|
||||
// PrepullImagesInParallel creates DaemonSets synchronously but waits in parallel for the images to pull
|
||||
func PrepullImagesInParallel(kubePrepuller Prepuller, timeout time.Duration) error {
|
||||
componentsToPrepull := constants.MasterComponents
|
||||
componentsToPrepull := append(constants.MasterComponents, constants.Etcd)
|
||||
fmt.Printf("[upgrade/prepull] Will prepull images for components %v\n", componentsToPrepull)
|
||||
|
||||
timeoutChan := time.After(timeout)
|
||||
|
316
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/staticpods.go
generated
vendored
316
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/staticpods.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
@ -28,6 +29,7 @@ import (
|
||||
etcdphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
etcdutil "k8s.io/kubernetes/cmd/kubeadm/app/util/etcd"
|
||||
"k8s.io/kubernetes/pkg/util/version"
|
||||
)
|
||||
|
||||
@ -49,6 +51,8 @@ type StaticPodPathManager interface {
|
||||
BackupManifestDir() string
|
||||
// BackupEtcdDir should point to the backup directory used for backuping manifests during the transition
|
||||
BackupEtcdDir() string
|
||||
// CleanupDirs cleans up all temporary directories
|
||||
CleanupDirs() error
|
||||
}
|
||||
|
||||
// KubeStaticPodPathManager is a real implementation of StaticPodPathManager that is used when upgrading a static pod cluster
|
||||
@ -57,34 +61,39 @@ type KubeStaticPodPathManager struct {
|
||||
tempManifestDir string
|
||||
backupManifestDir string
|
||||
backupEtcdDir string
|
||||
|
||||
keepManifestDir bool
|
||||
keepEtcdDir bool
|
||||
}
|
||||
|
||||
// NewKubeStaticPodPathManager creates a new instance of KubeStaticPodPathManager
|
||||
func NewKubeStaticPodPathManager(realDir, tempDir, backupDir, backupEtcdDir string) StaticPodPathManager {
|
||||
func NewKubeStaticPodPathManager(realDir, tempDir, backupDir, backupEtcdDir string, keepManifestDir, keepEtcdDir bool) StaticPodPathManager {
|
||||
return &KubeStaticPodPathManager{
|
||||
realManifestDir: realDir,
|
||||
tempManifestDir: tempDir,
|
||||
backupManifestDir: backupDir,
|
||||
backupEtcdDir: backupEtcdDir,
|
||||
keepManifestDir: keepManifestDir,
|
||||
keepEtcdDir: keepEtcdDir,
|
||||
}
|
||||
}
|
||||
|
||||
// NewKubeStaticPodPathManagerUsingTempDirs creates a new instance of KubeStaticPodPathManager with temporary directories backing it
|
||||
func NewKubeStaticPodPathManagerUsingTempDirs(realManifestDir string) (StaticPodPathManager, error) {
|
||||
func NewKubeStaticPodPathManagerUsingTempDirs(realManifestDir string, saveManifestsDir, saveEtcdDir bool) (StaticPodPathManager, error) {
|
||||
upgradedManifestsDir, err := constants.CreateTempDirForKubeadm("kubeadm-upgraded-manifests")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
backupManifestsDir, err := constants.CreateTempDirForKubeadm("kubeadm-backup-manifests")
|
||||
backupManifestsDir, err := constants.CreateTimestampDirForKubeadm("kubeadm-backup-manifests")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
backupEtcdDir, err := constants.CreateTempDirForKubeadm("kubeadm-backup-etcd")
|
||||
backupEtcdDir, err := constants.CreateTimestampDirForKubeadm("kubeadm-backup-etcd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewKubeStaticPodPathManager(realManifestDir, upgradedManifestsDir, backupManifestsDir, backupEtcdDir), nil
|
||||
return NewKubeStaticPodPathManager(realManifestDir, upgradedManifestsDir, backupManifestsDir, backupEtcdDir, saveManifestsDir, saveEtcdDir), nil
|
||||
}
|
||||
|
||||
// MoveFile should move a file from oldPath to newPath
|
||||
@ -127,26 +136,70 @@ func (spm *KubeStaticPodPathManager) BackupEtcdDir() string {
|
||||
return spm.backupEtcdDir
|
||||
}
|
||||
|
||||
func upgradeComponent(component string, waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.MasterConfiguration, beforePodHash string, recoverManifests map[string]string) error {
|
||||
// CleanupDirs cleans up all temporary directories except those the user has requested to keep around
|
||||
func (spm *KubeStaticPodPathManager) CleanupDirs() error {
|
||||
if err := os.RemoveAll(spm.TempManifestDir()); err != nil {
|
||||
return err
|
||||
}
|
||||
if !spm.keepManifestDir {
|
||||
if err := os.RemoveAll(spm.BackupManifestDir()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !spm.keepEtcdDir {
|
||||
if err := os.RemoveAll(spm.BackupEtcdDir()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func upgradeComponent(component string, waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.MasterConfiguration, beforePodHash string, recoverManifests map[string]string, isTLSUpgrade bool) error {
|
||||
// Special treatment is required for etcd case, when rollbackOldManifests should roll back etcd
|
||||
// manifests only for the case when component is Etcd
|
||||
recoverEtcd := false
|
||||
waitForComponentRestart := true
|
||||
if component == constants.Etcd {
|
||||
recoverEtcd = true
|
||||
}
|
||||
if isTLSUpgrade {
|
||||
// We currently depend on getting the Etcd mirror Pod hash from the KubeAPIServer;
|
||||
// Upgrading the Etcd protocol takes down the apiserver, so we can't verify component restarts if we restart Etcd independently.
|
||||
// Skip waiting for Etcd to restart and immediately move on to updating the apiserver.
|
||||
if component == constants.Etcd {
|
||||
waitForComponentRestart = false
|
||||
}
|
||||
// Normally, if an Etcd upgrade is successful, but the apiserver upgrade fails, Etcd is not rolled back.
|
||||
// In the case of a TLS upgrade, the old KubeAPIServer config is incompatible with the new Etcd confg, so we rollback Etcd
|
||||
// if the APIServer upgrade fails.
|
||||
if component == constants.KubeAPIServer {
|
||||
recoverEtcd = true
|
||||
fmt.Printf("[upgrade/staticpods] The %s manifest will be restored if component %q fails to upgrade\n", constants.Etcd, component)
|
||||
}
|
||||
}
|
||||
|
||||
// ensure etcd certs are generated for etcd and kube-apiserver
|
||||
if component == constants.Etcd || component == constants.KubeAPIServer {
|
||||
if err := certsphase.CreateEtcdCACertAndKeyFiles(cfg); err != nil {
|
||||
return fmt.Errorf("failed to upgrade the %s CA certificate and key: %v", constants.Etcd, err)
|
||||
}
|
||||
}
|
||||
if component == constants.Etcd {
|
||||
if err := certsphase.CreateEtcdServerCertAndKeyFiles(cfg); err != nil {
|
||||
return fmt.Errorf("failed to upgrade the %s certificate: %v", constants.Etcd, err)
|
||||
return fmt.Errorf("failed to upgrade the %s certificate and key: %v", constants.Etcd, err)
|
||||
}
|
||||
if err := certsphase.CreateEtcdPeerCertAndKeyFiles(cfg); err != nil {
|
||||
return fmt.Errorf("failed to upgrade the %s peer certificate: %v", constants.Etcd, err)
|
||||
return fmt.Errorf("failed to upgrade the %s peer certificate and key: %v", constants.Etcd, err)
|
||||
}
|
||||
if err := certsphase.CreateEtcdHealthcheckClientCertAndKeyFiles(cfg); err != nil {
|
||||
return fmt.Errorf("failed to upgrade the %s healthcheck certificate and key: %v", constants.Etcd, err)
|
||||
}
|
||||
}
|
||||
if component == constants.KubeAPIServer {
|
||||
if err := certsphase.CreateAPIServerEtcdClientCertAndKeyFiles(cfg); err != nil {
|
||||
return fmt.Errorf("failed to upgrade the %s %s-client certificate: %v", constants.KubeAPIServer, constants.Etcd, err)
|
||||
return fmt.Errorf("failed to upgrade the %s %s-client certificate and key: %v", constants.KubeAPIServer, constants.Etcd, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -172,41 +225,47 @@ func upgradeComponent(component string, waiter apiclient.Waiter, pathMgr StaticP
|
||||
}
|
||||
|
||||
fmt.Printf("[upgrade/staticpods] Moved new manifest to %q and backed up old manifest to %q\n", currentManifestPath, backupManifestPath)
|
||||
fmt.Println("[upgrade/staticpods] Waiting for the kubelet to restart the component")
|
||||
|
||||
// Wait for the mirror Pod hash to change; otherwise we'll run into race conditions here when the kubelet hasn't had time to
|
||||
// notice the removal of the Static Pod, leading to a false positive below where we check that the API endpoint is healthy
|
||||
// If we don't do this, there is a case where we remove the Static Pod manifest, kubelet is slow to react, kubeadm checks the
|
||||
// API endpoint below of the OLD Static Pod component and proceeds quickly enough, which might lead to unexpected results.
|
||||
if err := waiter.WaitForStaticPodControlPlaneHashChange(cfg.NodeName, component, beforePodHash); err != nil {
|
||||
return rollbackOldManifests(recoverManifests, err, pathMgr, recoverEtcd)
|
||||
if waitForComponentRestart {
|
||||
fmt.Println("[upgrade/staticpods] Waiting for the kubelet to restart the component")
|
||||
|
||||
// Wait for the mirror Pod hash to change; otherwise we'll run into race conditions here when the kubelet hasn't had time to
|
||||
// notice the removal of the Static Pod, leading to a false positive below where we check that the API endpoint is healthy
|
||||
// If we don't do this, there is a case where we remove the Static Pod manifest, kubelet is slow to react, kubeadm checks the
|
||||
// API endpoint below of the OLD Static Pod component and proceeds quickly enough, which might lead to unexpected results.
|
||||
if err := waiter.WaitForStaticPodHashChange(cfg.NodeRegistration.Name, component, beforePodHash); err != nil {
|
||||
return rollbackOldManifests(recoverManifests, err, pathMgr, recoverEtcd)
|
||||
}
|
||||
|
||||
// Wait for the static pod component to come up and register itself as a mirror pod
|
||||
if err := waiter.WaitForPodsWithLabel("component=" + component); err != nil {
|
||||
return rollbackOldManifests(recoverManifests, err, pathMgr, recoverEtcd)
|
||||
}
|
||||
|
||||
fmt.Printf("[upgrade/staticpods] Component %q upgraded successfully!\n", component)
|
||||
} else {
|
||||
fmt.Printf("[upgrade/staticpods] Not waiting for pod-hash change for component %q\n", component)
|
||||
}
|
||||
|
||||
// Wait for the static pod component to come up and register itself as a mirror pod
|
||||
if err := waiter.WaitForPodsWithLabel("component=" + component); err != nil {
|
||||
return rollbackOldManifests(recoverManifests, err, pathMgr, recoverEtcd)
|
||||
}
|
||||
|
||||
fmt.Printf("[upgrade/staticpods] Component %q upgraded successfully!\n", component)
|
||||
return nil
|
||||
}
|
||||
|
||||
// performEtcdStaticPodUpgrade performs upgrade of etcd, it returns bool which indicates fatal error or not and the actual error.
|
||||
func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.MasterConfiguration, recoverManifests map[string]string) (bool, error) {
|
||||
func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.MasterConfiguration, recoverManifests map[string]string, isTLSUpgrade bool, oldEtcdClient, newEtcdClient etcdutil.ClusterInterrogator) (bool, error) {
|
||||
// Add etcd static pod spec only if external etcd is not configured
|
||||
if len(cfg.Etcd.Endpoints) != 0 {
|
||||
if cfg.Etcd.External != nil {
|
||||
return false, fmt.Errorf("external etcd detected, won't try to change any etcd state")
|
||||
}
|
||||
|
||||
// Checking health state of etcd before proceeding with the upgrade
|
||||
etcdCluster := util.LocalEtcdCluster{}
|
||||
etcdStatus, err := etcdCluster.GetEtcdClusterStatus()
|
||||
_, err := oldEtcdClient.GetClusterStatus()
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("etcd cluster is not healthy: %v", err)
|
||||
}
|
||||
|
||||
// Backing up etcd data store
|
||||
backupEtcdDir := pathMgr.BackupEtcdDir()
|
||||
runningEtcdDir := cfg.Etcd.DataDir
|
||||
runningEtcdDir := cfg.Etcd.Local.DataDir
|
||||
if err := util.CopyDir(runningEtcdDir, backupEtcdDir); err != nil {
|
||||
return true, fmt.Errorf("failed to back up etcd data: %v", err)
|
||||
}
|
||||
@ -214,11 +273,15 @@ func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathM
|
||||
// Need to check currently used version and version from constants, if differs then upgrade
|
||||
desiredEtcdVersion, err := constants.EtcdSupportedVersion(cfg.KubernetesVersion)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("failed to parse the desired etcd version(%s): %v", desiredEtcdVersion.String(), err)
|
||||
return true, fmt.Errorf("failed to retrieve an etcd version for the target kubernetes version: %v", err)
|
||||
}
|
||||
currentEtcdVersion, err := version.ParseSemantic(etcdStatus.Version)
|
||||
currentEtcdVersionStr, err := oldEtcdClient.GetVersion()
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("failed to parse the current etcd version(%s): %v", currentEtcdVersion.String(), err)
|
||||
return true, fmt.Errorf("failed to retrieve the current etcd version: %v", err)
|
||||
}
|
||||
currentEtcdVersion, err := version.ParseSemantic(currentEtcdVersionStr)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("failed to parse the current etcd version(%s): %v", currentEtcdVersionStr, err)
|
||||
}
|
||||
|
||||
// Comparing current etcd version with desired to catch the same version or downgrade condition and fail on them.
|
||||
@ -230,7 +293,7 @@ func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathM
|
||||
return false, nil
|
||||
}
|
||||
|
||||
beforeEtcdPodHash, err := waiter.WaitForStaticPodSingleHash(cfg.NodeName, constants.Etcd)
|
||||
beforeEtcdPodHash, err := waiter.WaitForStaticPodSingleHash(cfg.NodeRegistration.Name, constants.Etcd)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("failed to get etcd pod's hash: %v", err)
|
||||
}
|
||||
@ -241,52 +304,93 @@ func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathM
|
||||
return true, fmt.Errorf("error creating local etcd static pod manifest file: %v", err)
|
||||
}
|
||||
|
||||
// Waiter configurations for checking etcd status
|
||||
noDelay := 0 * time.Second
|
||||
podRestartDelay := noDelay
|
||||
if isTLSUpgrade {
|
||||
// If we are upgrading TLS we need to wait for old static pod to be removed.
|
||||
// This is needed because we are not able to currently verify that the static pod
|
||||
// has been updated through the apiserver across an etcd TLS upgrade.
|
||||
// This value is arbitrary but seems to be long enough in manual testing.
|
||||
podRestartDelay = 30 * time.Second
|
||||
}
|
||||
retries := 10
|
||||
retryInterval := 15 * time.Second
|
||||
|
||||
// Perform etcd upgrade using common to all control plane components function
|
||||
if err := upgradeComponent(constants.Etcd, waiter, pathMgr, cfg, beforeEtcdPodHash, recoverManifests); err != nil {
|
||||
// Since etcd upgrade component failed, the old manifest has been restored
|
||||
// now we need to check the health of etcd cluster if it came back up with old manifest
|
||||
if _, err := etcdCluster.GetEtcdClusterStatus(); err != nil {
|
||||
if err := upgradeComponent(constants.Etcd, waiter, pathMgr, cfg, beforeEtcdPodHash, recoverManifests, isTLSUpgrade); err != nil {
|
||||
fmt.Printf("[upgrade/etcd] Failed to upgrade etcd: %v\n", err)
|
||||
// Since upgrade component failed, the old etcd manifest has either been restored or was never touched
|
||||
// Now we need to check the health of etcd cluster if it is up with old manifest
|
||||
fmt.Println("[upgrade/etcd] Waiting for previous etcd to become available")
|
||||
if _, err := oldEtcdClient.WaitForClusterAvailable(noDelay, retries, retryInterval); err != nil {
|
||||
fmt.Printf("[upgrade/etcd] Failed to healthcheck previous etcd: %v\n", err)
|
||||
|
||||
// At this point we know that etcd cluster is dead and it is safe to copy backup datastore and to rollback old etcd manifest
|
||||
if err := rollbackEtcdData(cfg, fmt.Errorf("etcd cluster is not healthy after upgrade: %v rolling back", err), pathMgr); err != nil {
|
||||
fmt.Println("[upgrade/etcd] Rolling back etcd data")
|
||||
if err := rollbackEtcdData(cfg, pathMgr); err != nil {
|
||||
// Even copying back datastore failed, no options for recovery left, bailing out
|
||||
return true, fmt.Errorf("fatal error upgrading local etcd cluster: %v, the backup of etcd database is stored here:(%s)", err, backupEtcdDir)
|
||||
return true, fmt.Errorf("fatal error rolling back local etcd cluster datadir: %v, the backup of etcd database is stored here:(%s)", err, backupEtcdDir)
|
||||
}
|
||||
// Old datastore has been copied, rolling back old manifests
|
||||
if err := rollbackOldManifests(recoverManifests, err, pathMgr, true); err != nil {
|
||||
// Rolling back to old manifests failed, no options for recovery left, bailing out
|
||||
return true, fmt.Errorf("fatal error upgrading local etcd cluster: %v, the backup of etcd database is stored here:(%s)", err, backupEtcdDir)
|
||||
}
|
||||
// Since rollback of the old etcd manifest was successful, checking again the status of etcd cluster
|
||||
if _, err := etcdCluster.GetEtcdClusterStatus(); err != nil {
|
||||
fmt.Println("[upgrade/etcd] Etcd data rollback successful")
|
||||
|
||||
// Now that we've rolled back the data, let's check if the cluster comes up
|
||||
fmt.Println("[upgrade/etcd] Waiting for previous etcd to become available")
|
||||
if _, err := oldEtcdClient.WaitForClusterAvailable(noDelay, retries, retryInterval); err != nil {
|
||||
fmt.Printf("[upgrade/etcd] Failed to healthcheck previous etcd: %v\n", err)
|
||||
// Nothing else left to try to recover etcd cluster
|
||||
return true, fmt.Errorf("fatal error upgrading local etcd cluster: %v, the backup of etcd database is stored here:(%s)", err, backupEtcdDir)
|
||||
return true, fmt.Errorf("fatal error rolling back local etcd cluster manifest: %v, the backup of etcd database is stored here:(%s)", err, backupEtcdDir)
|
||||
}
|
||||
|
||||
return true, fmt.Errorf("fatal error upgrading local etcd cluster: %v, rolled the state back to pre-upgrade state", err)
|
||||
// We've recovered to the previous etcd from this case
|
||||
}
|
||||
fmt.Println("[upgrade/etcd] Etcd was rolled back and is now available")
|
||||
|
||||
// Since etcd cluster came back up with the old manifest
|
||||
return true, fmt.Errorf("fatal error when trying to upgrade the etcd cluster: %v, rolled the state back to pre-upgrade state", err)
|
||||
}
|
||||
|
||||
// Checking health state of etcd after the upgrade
|
||||
if _, err = etcdCluster.GetEtcdClusterStatus(); err != nil {
|
||||
// Despite the fact that upgradeComponent was successful, there is something wrong with etcd cluster
|
||||
// First step is to restore back up of datastore
|
||||
if err := rollbackEtcdData(cfg, fmt.Errorf("etcd cluster is not healthy after upgrade: %v rolling back", err), pathMgr); err != nil {
|
||||
// Even copying back datastore failed, no options for recovery left, bailing out
|
||||
return true, fmt.Errorf("fatal error upgrading local etcd cluster: %v, the backup of etcd database is stored here:(%s)", err, backupEtcdDir)
|
||||
}
|
||||
// Old datastore has been copied, rolling back old manifests
|
||||
if err := rollbackOldManifests(recoverManifests, err, pathMgr, true); err != nil {
|
||||
// Rolling back to old manifests failed, no options for recovery left, bailing out
|
||||
return true, fmt.Errorf("fatal error upgrading local etcd cluster: %v, the backup of etcd database is stored here:(%s)", err, backupEtcdDir)
|
||||
}
|
||||
// Since rollback of the old etcd manifest was successful, checking again the status of etcd cluster
|
||||
if _, err := etcdCluster.GetEtcdClusterStatus(); err != nil {
|
||||
// Nothing else left to try to recover etcd cluster
|
||||
return true, fmt.Errorf("fatal error upgrading local etcd cluster: %v, the backup of etcd database is stored here:(%s)", err, backupEtcdDir)
|
||||
// Initialize the new etcd client if it wasn't pre-initialized
|
||||
if newEtcdClient == nil {
|
||||
client, err := etcdutil.NewFromStaticPod(
|
||||
[]string{"localhost:2379"},
|
||||
constants.GetStaticPodDirectory(),
|
||||
cfg.CertificatesDir,
|
||||
)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("fatal error creating etcd client: %v", err)
|
||||
}
|
||||
newEtcdClient = client
|
||||
}
|
||||
|
||||
// Checking health state of etcd after the upgrade
|
||||
fmt.Println("[upgrade/etcd] Waiting for etcd to become available")
|
||||
if _, err = newEtcdClient.WaitForClusterAvailable(podRestartDelay, retries, retryInterval); err != nil {
|
||||
fmt.Printf("[upgrade/etcd] Failed to healthcheck etcd: %v\n", err)
|
||||
// Despite the fact that upgradeComponent was successful, there is something wrong with the etcd cluster
|
||||
// First step is to restore back up of datastore
|
||||
fmt.Println("[upgrade/etcd] Rolling back etcd data")
|
||||
if err := rollbackEtcdData(cfg, pathMgr); err != nil {
|
||||
// Even copying back datastore failed, no options for recovery left, bailing out
|
||||
return true, fmt.Errorf("fatal error rolling back local etcd cluster datadir: %v, the backup of etcd database is stored here:(%s)", err, backupEtcdDir)
|
||||
}
|
||||
fmt.Println("[upgrade/etcd] Etcd data rollback successful")
|
||||
|
||||
// Old datastore has been copied, rolling back old manifests
|
||||
fmt.Println("[upgrade/etcd] Rolling back etcd manifest")
|
||||
rollbackOldManifests(recoverManifests, err, pathMgr, true)
|
||||
// rollbackOldManifests() always returns an error -- ignore it and continue
|
||||
|
||||
// Assuming rollback of the old etcd manifest was successful, check the status of etcd cluster again
|
||||
fmt.Println("[upgrade/etcd] Waiting for previous etcd to become available")
|
||||
if _, err := oldEtcdClient.WaitForClusterAvailable(noDelay, retries, retryInterval); err != nil {
|
||||
fmt.Printf("[upgrade/etcd] Failed to healthcheck previous etcd: %v\n", err)
|
||||
// Nothing else left to try to recover etcd cluster
|
||||
return true, fmt.Errorf("fatal error rolling back local etcd cluster manifest: %v, the backup of etcd database is stored here:(%s)", err, backupEtcdDir)
|
||||
}
|
||||
fmt.Println("[upgrade/etcd] Etcd was rolled back and is now available")
|
||||
|
||||
// We've successfully rolled back etcd, and now return an error describing that the upgrade failed
|
||||
return true, fmt.Errorf("fatal error upgrading local etcd cluster: %v, rolled the state back to pre-upgrade state", err)
|
||||
}
|
||||
|
||||
@ -294,13 +398,60 @@ func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathM
|
||||
}
|
||||
|
||||
// StaticPodControlPlane upgrades a static pod-hosted control plane
|
||||
func StaticPodControlPlane(waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.MasterConfiguration, etcdUpgrade bool) error {
|
||||
func StaticPodControlPlane(waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.MasterConfiguration, etcdUpgrade bool, oldEtcdClient, newEtcdClient etcdutil.ClusterInterrogator) error {
|
||||
recoverManifests := map[string]string{}
|
||||
var isTLSUpgrade bool
|
||||
var isExternalEtcd bool
|
||||
|
||||
beforePodHashMap, err := waiter.WaitForStaticPodControlPlaneHashes(cfg.NodeRegistration.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if oldEtcdClient == nil {
|
||||
if cfg.Etcd.External != nil {
|
||||
// External etcd
|
||||
isExternalEtcd = true
|
||||
client, err := etcdutil.New(
|
||||
cfg.Etcd.External.Endpoints,
|
||||
cfg.Etcd.External.CAFile,
|
||||
cfg.Etcd.External.CertFile,
|
||||
cfg.Etcd.External.KeyFile,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create etcd client for external etcd: %v", err)
|
||||
}
|
||||
oldEtcdClient = client
|
||||
// Since etcd is managed externally, the new etcd client will be the same as the old client
|
||||
if newEtcdClient == nil {
|
||||
newEtcdClient = client
|
||||
}
|
||||
} else {
|
||||
// etcd Static Pod
|
||||
client, err := etcdutil.NewFromStaticPod(
|
||||
[]string{"localhost:2379"},
|
||||
constants.GetStaticPodDirectory(),
|
||||
cfg.CertificatesDir,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create etcd client: %v", err)
|
||||
}
|
||||
oldEtcdClient = client
|
||||
}
|
||||
}
|
||||
|
||||
// etcd upgrade is done prior to other control plane components
|
||||
if etcdUpgrade {
|
||||
if !isExternalEtcd && etcdUpgrade {
|
||||
previousEtcdHasTLS := oldEtcdClient.HasTLS()
|
||||
|
||||
// set the TLS upgrade flag for all components
|
||||
isTLSUpgrade = !previousEtcdHasTLS
|
||||
if isTLSUpgrade {
|
||||
fmt.Printf("[upgrade/etcd] Upgrading to TLS for %s\n", constants.Etcd)
|
||||
}
|
||||
|
||||
// Perform etcd upgrade using common to all control plane components function
|
||||
fatal, err := performEtcdStaticPodUpgrade(waiter, pathMgr, cfg, recoverManifests)
|
||||
fatal, err := performEtcdStaticPodUpgrade(waiter, pathMgr, cfg, recoverManifests, isTLSUpgrade, oldEtcdClient, newEtcdClient)
|
||||
if err != nil {
|
||||
if fatal {
|
||||
return err
|
||||
@ -309,11 +460,6 @@ func StaticPodControlPlane(waiter apiclient.Waiter, pathMgr StaticPodPathManager
|
||||
}
|
||||
}
|
||||
|
||||
beforePodHashMap, err := waiter.WaitForStaticPodControlPlaneHashes(cfg.NodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the updated static Pod manifests into the temporary directory
|
||||
fmt.Printf("[upgrade/staticpods] Writing new Static Pod manifests to %q\n", pathMgr.TempManifestDir())
|
||||
err = controlplanephase.CreateInitStaticPodManifestFiles(pathMgr.TempManifestDir(), cfg)
|
||||
@ -322,7 +468,7 @@ func StaticPodControlPlane(waiter apiclient.Waiter, pathMgr StaticPodPathManager
|
||||
}
|
||||
|
||||
for _, component := range constants.MasterComponents {
|
||||
if err = upgradeComponent(component, waiter, pathMgr, cfg, beforePodHashMap[component], recoverManifests); err != nil {
|
||||
if err = upgradeComponent(component, waiter, pathMgr, cfg, beforePodHashMap[component], recoverManifests, isTLSUpgrade); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -330,14 +476,11 @@ func StaticPodControlPlane(waiter apiclient.Waiter, pathMgr StaticPodPathManager
|
||||
// Remove the temporary directories used on a best-effort (don't fail if the calls error out)
|
||||
// The calls are set here by design; we should _not_ use "defer" above as that would remove the directories
|
||||
// even in the "fail and rollback" case, where we want the directories preserved for the user.
|
||||
os.RemoveAll(pathMgr.TempManifestDir())
|
||||
os.RemoveAll(pathMgr.BackupManifestDir())
|
||||
os.RemoveAll(pathMgr.BackupEtcdDir())
|
||||
|
||||
return nil
|
||||
return pathMgr.CleanupDirs()
|
||||
}
|
||||
|
||||
// rollbackOldManifests rolls back the backuped manifests if something went wrong
|
||||
// rollbackOldManifests rolls back the backed-up manifests if something went wrong.
|
||||
// It always returns an error to the caller.
|
||||
func rollbackOldManifests(oldManifests map[string]string, origErr error, pathMgr StaticPodPathManager, restoreEtcd bool) error {
|
||||
errs := []error{origErr}
|
||||
for component, backupPath := range oldManifests {
|
||||
@ -358,17 +501,16 @@ func rollbackOldManifests(oldManifests map[string]string, origErr error, pathMgr
|
||||
return fmt.Errorf("couldn't upgrade control plane. kubeadm has tried to recover everything into the earlier state. Errors faced: %v", errs)
|
||||
}
|
||||
|
||||
// rollbackEtcdData rolls back the the content of etcd folder if something went wrong
|
||||
func rollbackEtcdData(cfg *kubeadmapi.MasterConfiguration, origErr error, pathMgr StaticPodPathManager) error {
|
||||
errs := []error{origErr}
|
||||
// rollbackEtcdData rolls back the the content of etcd folder if something went wrong.
|
||||
// When the folder contents are successfully rolled back, nil is returned, otherwise an error is returned.
|
||||
func rollbackEtcdData(cfg *kubeadmapi.MasterConfiguration, pathMgr StaticPodPathManager) error {
|
||||
backupEtcdDir := pathMgr.BackupEtcdDir()
|
||||
runningEtcdDir := cfg.Etcd.DataDir
|
||||
err := util.CopyDir(backupEtcdDir, runningEtcdDir)
|
||||
runningEtcdDir := cfg.Etcd.Local.DataDir
|
||||
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
if err := util.CopyDir(backupEtcdDir, runningEtcdDir); err != nil {
|
||||
// Let the user know there we're problems, but we tried to reçover
|
||||
return fmt.Errorf("couldn't recover etcd database with error: %v, the location of etcd backup: %s ", err, backupEtcdDir)
|
||||
}
|
||||
|
||||
// Let the user know there we're problems, but we tried to reçover
|
||||
return fmt.Errorf("couldn't recover etcd database with error: %v, the location of etcd backup: %s ", errs, backupEtcdDir)
|
||||
return nil
|
||||
}
|
||||
|
298
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/staticpods_test.go
generated
vendored
298
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/staticpods_test.go
generated
vendored
@ -21,19 +21,23 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
|
||||
kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"
|
||||
kubeadmapiv1alpha2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha2"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
|
||||
controlplanephase "k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane"
|
||||
etcdphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
etcdutil "k8s.io/kubernetes/cmd/kubeadm/app/util/etcd"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -42,27 +46,19 @@ const (
|
||||
waitForPodsWithLabel = "wait-for-pods-with-label"
|
||||
|
||||
testConfiguration = `
|
||||
apiVersion: kubeadm.k8s.io/v1alpha2
|
||||
kind: MasterConfiguration
|
||||
api:
|
||||
advertiseAddress: 1.2.3.4
|
||||
bindPort: 6443
|
||||
apiServerCertSANs: null
|
||||
apiServerExtraArgs: null
|
||||
authorizationModes:
|
||||
- Node
|
||||
- RBAC
|
||||
certificatesDir: %s
|
||||
cloudProvider: ""
|
||||
controllerManagerExtraArgs: null
|
||||
etcd:
|
||||
caFile: ""
|
||||
certFile: ""
|
||||
dataDir: /var/lib/etcd
|
||||
endpoints: null
|
||||
extraArgs: null
|
||||
image: ""
|
||||
keyFile: ""
|
||||
serverCertSANs: null
|
||||
peerCertSANs: null
|
||||
local:
|
||||
dataDir: %s
|
||||
image: ""
|
||||
featureFlags: null
|
||||
imageRepository: k8s.gcr.io
|
||||
kubernetesVersion: %s
|
||||
@ -70,7 +66,9 @@ networking:
|
||||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
nodeName: thegopher
|
||||
nodeRegistration:
|
||||
name: foo
|
||||
criSocket: ""
|
||||
schedulerExtraArgs: null
|
||||
token: ce3aa5.5ec8455bb76b379f
|
||||
tokenTTL: 24h
|
||||
@ -117,8 +115,8 @@ func (w *fakeWaiter) WaitForStaticPodSingleHash(_ string, _ string) (string, err
|
||||
return "", w.errsToReturn[waitForHashes]
|
||||
}
|
||||
|
||||
// WaitForStaticPodControlPlaneHashChange returns an error if set from errsToReturn
|
||||
func (w *fakeWaiter) WaitForStaticPodControlPlaneHashChange(_, _, _ string) error {
|
||||
// WaitForStaticPodHashChange returns an error if set from errsToReturn
|
||||
func (w *fakeWaiter) WaitForStaticPodHashChange(_, _, _ string) error {
|
||||
return w.errsToReturn[waitForHashChange]
|
||||
}
|
||||
|
||||
@ -128,6 +126,7 @@ func (w *fakeWaiter) WaitForHealthyKubelet(_ time.Duration, _ string) error {
|
||||
}
|
||||
|
||||
type fakeStaticPodPathManager struct {
|
||||
kubernetesDir string
|
||||
realManifestDir string
|
||||
tempManifestDir string
|
||||
backupManifestDir string
|
||||
@ -136,29 +135,36 @@ type fakeStaticPodPathManager struct {
|
||||
}
|
||||
|
||||
func NewFakeStaticPodPathManager(moveFileFunc func(string, string) error) (StaticPodPathManager, error) {
|
||||
realManifestsDir, err := ioutil.TempDir("", "kubeadm-upgraded-manifests")
|
||||
kubernetesDir, err := ioutil.TempDir("", "kubeadm-pathmanager-")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create a temporary directory for the upgrade: %v", err)
|
||||
}
|
||||
|
||||
upgradedManifestsDir, err := ioutil.TempDir("", "kubeadm-upgraded-manifests")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create a temporary directory for the upgrade: %v", err)
|
||||
realManifestDir := filepath.Join(kubernetesDir, constants.ManifestsSubDirName)
|
||||
if err := os.Mkdir(realManifestDir, 0700); err != nil {
|
||||
return nil, fmt.Errorf("couldn't create a realManifestDir for the upgrade: %v", err)
|
||||
}
|
||||
|
||||
backupManifestsDir, err := ioutil.TempDir("", "kubeadm-backup-manifests")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create a temporary directory for the upgrade: %v", err)
|
||||
upgradedManifestDir := filepath.Join(kubernetesDir, "upgraded-manifests")
|
||||
if err := os.Mkdir(upgradedManifestDir, 0700); err != nil {
|
||||
return nil, fmt.Errorf("couldn't create a upgradedManifestDir for the upgrade: %v", err)
|
||||
}
|
||||
backupEtcdDir, err := ioutil.TempDir("", "kubeadm-backup-etcd")
|
||||
if err != nil {
|
||||
|
||||
backupManifestDir := filepath.Join(kubernetesDir, "backup-manifests")
|
||||
if err := os.Mkdir(backupManifestDir, 0700); err != nil {
|
||||
return nil, fmt.Errorf("couldn't create a backupManifestDir for the upgrade: %v", err)
|
||||
}
|
||||
|
||||
backupEtcdDir := filepath.Join(kubernetesDir, "kubeadm-backup-etcd")
|
||||
if err := os.Mkdir(backupEtcdDir, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &fakeStaticPodPathManager{
|
||||
realManifestDir: realManifestsDir,
|
||||
tempManifestDir: upgradedManifestsDir,
|
||||
backupManifestDir: backupManifestsDir,
|
||||
kubernetesDir: kubernetesDir,
|
||||
realManifestDir: realManifestDir,
|
||||
tempManifestDir: upgradedManifestDir,
|
||||
backupManifestDir: backupManifestDir,
|
||||
backupEtcdDir: backupEtcdDir,
|
||||
MoveFileFunc: moveFileFunc,
|
||||
}, nil
|
||||
@ -168,6 +174,10 @@ func (spm *fakeStaticPodPathManager) MoveFile(oldPath, newPath string) error {
|
||||
return spm.MoveFileFunc(oldPath, newPath)
|
||||
}
|
||||
|
||||
func (spm *fakeStaticPodPathManager) KubernetesDir() string {
|
||||
return spm.kubernetesDir
|
||||
}
|
||||
|
||||
func (spm *fakeStaticPodPathManager) RealManifestPath(component string) string {
|
||||
return constants.GetStaticPodFilepath(component, spm.realManifestDir)
|
||||
}
|
||||
@ -193,14 +203,95 @@ func (spm *fakeStaticPodPathManager) BackupEtcdDir() string {
|
||||
return spm.backupEtcdDir
|
||||
}
|
||||
|
||||
func (spm *fakeStaticPodPathManager) CleanupDirs() error {
|
||||
if err := os.RemoveAll(spm.TempManifestDir()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.RemoveAll(spm.BackupManifestDir()); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.RemoveAll(spm.BackupEtcdDir())
|
||||
}
|
||||
|
||||
type fakeTLSEtcdClient struct{ TLS bool }
|
||||
|
||||
func (c fakeTLSEtcdClient) HasTLS() bool {
|
||||
return c.TLS
|
||||
}
|
||||
|
||||
func (c fakeTLSEtcdClient) ClusterAvailable() (bool, error) { return true, nil }
|
||||
|
||||
func (c fakeTLSEtcdClient) WaitForClusterAvailable(delay time.Duration, retries int, retryInterval time.Duration) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (c fakeTLSEtcdClient) GetClusterStatus() (map[string]*clientv3.StatusResponse, error) {
|
||||
return map[string]*clientv3.StatusResponse{
|
||||
"foo": {
|
||||
Version: "3.1.12",
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (c fakeTLSEtcdClient) GetClusterVersions() (map[string]string, error) {
|
||||
return map[string]string{
|
||||
"foo": "3.1.12",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c fakeTLSEtcdClient) GetVersion() (string, error) {
|
||||
return "3.1.12", nil
|
||||
}
|
||||
|
||||
type fakePodManifestEtcdClient struct{ ManifestDir, CertificatesDir string }
|
||||
|
||||
func (c fakePodManifestEtcdClient) HasTLS() bool {
|
||||
hasTLS, _ := etcdutil.PodManifestsHaveTLS(c.ManifestDir)
|
||||
return hasTLS
|
||||
}
|
||||
|
||||
func (c fakePodManifestEtcdClient) ClusterAvailable() (bool, error) { return true, nil }
|
||||
|
||||
func (c fakePodManifestEtcdClient) WaitForClusterAvailable(delay time.Duration, retries int, retryInterval time.Duration) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (c fakePodManifestEtcdClient) GetClusterStatus() (map[string]*clientv3.StatusResponse, error) {
|
||||
// Make sure the certificates generated from the upgrade are readable from disk
|
||||
tlsInfo := transport.TLSInfo{
|
||||
CertFile: filepath.Join(c.CertificatesDir, constants.EtcdCACertName),
|
||||
KeyFile: filepath.Join(c.CertificatesDir, constants.EtcdHealthcheckClientCertName),
|
||||
TrustedCAFile: filepath.Join(c.CertificatesDir, constants.EtcdHealthcheckClientKeyName),
|
||||
}
|
||||
_, err := tlsInfo.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return map[string]*clientv3.StatusResponse{
|
||||
"foo": {Version: "3.1.12"},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c fakePodManifestEtcdClient) GetClusterVersions() (map[string]string, error) {
|
||||
return map[string]string{
|
||||
"foo": "3.1.12",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c fakePodManifestEtcdClient) GetVersion() (string, error) {
|
||||
return "3.1.12", nil
|
||||
}
|
||||
|
||||
func TestStaticPodControlPlane(t *testing.T) {
|
||||
tests := []struct {
|
||||
description string
|
||||
waitErrsToReturn map[string]error
|
||||
moveFileFunc func(string, string) error
|
||||
expectedErr bool
|
||||
manifestShouldChange bool
|
||||
}{
|
||||
{ // error-free case should succeed
|
||||
{
|
||||
description: "error-free case should succeed",
|
||||
waitErrsToReturn: map[string]error{
|
||||
waitForHashes: nil,
|
||||
waitForHashChange: nil,
|
||||
@ -212,7 +303,8 @@ func TestStaticPodControlPlane(t *testing.T) {
|
||||
expectedErr: false,
|
||||
manifestShouldChange: true,
|
||||
},
|
||||
{ // any wait error should result in a rollback and an abort
|
||||
{
|
||||
description: "any wait error should result in a rollback and an abort",
|
||||
waitErrsToReturn: map[string]error{
|
||||
waitForHashes: fmt.Errorf("boo! failed"),
|
||||
waitForHashChange: nil,
|
||||
@ -224,7 +316,8 @@ func TestStaticPodControlPlane(t *testing.T) {
|
||||
expectedErr: true,
|
||||
manifestShouldChange: false,
|
||||
},
|
||||
{ // any wait error should result in a rollback and an abort
|
||||
{
|
||||
description: "any wait error should result in a rollback and an abort",
|
||||
waitErrsToReturn: map[string]error{
|
||||
waitForHashes: nil,
|
||||
waitForHashChange: fmt.Errorf("boo! failed"),
|
||||
@ -236,7 +329,8 @@ func TestStaticPodControlPlane(t *testing.T) {
|
||||
expectedErr: true,
|
||||
manifestShouldChange: false,
|
||||
},
|
||||
{ // any wait error should result in a rollback and an abort
|
||||
{
|
||||
description: "any wait error should result in a rollback and an abort",
|
||||
waitErrsToReturn: map[string]error{
|
||||
waitForHashes: nil,
|
||||
waitForHashChange: nil,
|
||||
@ -248,7 +342,8 @@ func TestStaticPodControlPlane(t *testing.T) {
|
||||
expectedErr: true,
|
||||
manifestShouldChange: false,
|
||||
},
|
||||
{ // any path-moving error should result in a rollback and an abort
|
||||
{
|
||||
description: "any path-moving error should result in a rollback and an abort",
|
||||
waitErrsToReturn: map[string]error{
|
||||
waitForHashes: nil,
|
||||
waitForHashChange: nil,
|
||||
@ -264,7 +359,8 @@ func TestStaticPodControlPlane(t *testing.T) {
|
||||
expectedErr: true,
|
||||
manifestShouldChange: false,
|
||||
},
|
||||
{ // any path-moving error should result in a rollback and an abort
|
||||
{
|
||||
description: "any path-moving error should result in a rollback and an abort",
|
||||
waitErrsToReturn: map[string]error{
|
||||
waitForHashes: nil,
|
||||
waitForHashChange: nil,
|
||||
@ -280,7 +376,8 @@ func TestStaticPodControlPlane(t *testing.T) {
|
||||
expectedErr: true,
|
||||
manifestShouldChange: false,
|
||||
},
|
||||
{ // any path-moving error should result in a rollback and an abort; even though this is the last component (kube-apiserver and kube-controller-manager healthy)
|
||||
{
|
||||
description: "any path-moving error should result in a rollback and an abort; even though this is the last component (kube-apiserver and kube-controller-manager healthy)",
|
||||
waitErrsToReturn: map[string]error{
|
||||
waitForHashes: nil,
|
||||
waitForHashChange: nil,
|
||||
@ -304,28 +401,34 @@ func TestStaticPodControlPlane(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't run NewFakeStaticPodPathManager: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(pathMgr.RealManifestDir())
|
||||
defer os.RemoveAll(pathMgr.TempManifestDir())
|
||||
defer os.RemoveAll(pathMgr.BackupManifestDir())
|
||||
defer os.RemoveAll(pathMgr.(*fakeStaticPodPathManager).KubernetesDir())
|
||||
constants.KubernetesDir = pathMgr.(*fakeStaticPodPathManager).KubernetesDir()
|
||||
|
||||
tempCertsDir, err := ioutil.TempDir("", "kubeadm-certs")
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't create temporary certificates directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempCertsDir)
|
||||
tmpEtcdDataDir, err := ioutil.TempDir("", "kubeadm-etcd-data")
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't create temporary etcd data directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpEtcdDataDir)
|
||||
|
||||
oldcfg, err := getConfig("v1.7.0", tempCertsDir)
|
||||
oldcfg, err := getConfig("v1.9.0", tempCertsDir, tmpEtcdDataDir)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't create config: %v", err)
|
||||
}
|
||||
|
||||
// Initialize PKI minus any etcd certificates to simulate etcd PKI upgrade
|
||||
certActions := []func(cfg *kubeadmapi.MasterConfiguration) error{
|
||||
certsphase.CreateCACertAndKeyfiles,
|
||||
certsphase.CreateCACertAndKeyFiles,
|
||||
certsphase.CreateAPIServerCertAndKeyFiles,
|
||||
certsphase.CreateAPIServerKubeletClientCertAndKeyFiles,
|
||||
// certsphase.CreateEtcdCACertAndKeyFiles,
|
||||
// certsphase.CreateEtcdServerCertAndKeyFiles,
|
||||
// certsphase.CreateEtcdPeerCertAndKeyFiles,
|
||||
// certsphase.CreateEtcdHealthcheckClientCertAndKeyFiles,
|
||||
// certsphase.CreateAPIServerEtcdClientCertAndKeyFiles,
|
||||
certsphase.CreateServiceAccountKeyAndPublicKeyFiles,
|
||||
certsphase.CreateFrontProxyCACertAndKeyFiles,
|
||||
@ -354,15 +457,28 @@ func TestStaticPodControlPlane(t *testing.T) {
|
||||
t.Fatalf("couldn't read temp file: %v", err)
|
||||
}
|
||||
|
||||
newcfg, err := getConfig("v1.8.0", tempCertsDir)
|
||||
newcfg, err := getConfig("v1.10.0", tempCertsDir, tmpEtcdDataDir)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't create config: %v", err)
|
||||
}
|
||||
|
||||
actualErr := StaticPodControlPlane(waiter, pathMgr, newcfg, false)
|
||||
actualErr := StaticPodControlPlane(
|
||||
waiter,
|
||||
pathMgr,
|
||||
newcfg,
|
||||
true,
|
||||
fakeTLSEtcdClient{
|
||||
TLS: false,
|
||||
},
|
||||
fakePodManifestEtcdClient{
|
||||
ManifestDir: pathMgr.RealManifestDir(),
|
||||
CertificatesDir: newcfg.CertificatesDir,
|
||||
},
|
||||
)
|
||||
if (actualErr != nil) != rt.expectedErr {
|
||||
t.Errorf(
|
||||
"failed UpgradeStaticPodControlPlane\n\texpected error: %t\n\tgot: %t\n\tactual error: %v",
|
||||
"failed UpgradeStaticPodControlPlane\n%s\n\texpected error: %t\n\tgot: %t\n\tactual error: %v",
|
||||
rt.description,
|
||||
rt.expectedErr,
|
||||
(actualErr != nil),
|
||||
actualErr,
|
||||
@ -376,12 +492,13 @@ func TestStaticPodControlPlane(t *testing.T) {
|
||||
|
||||
if (oldHash != newHash) != rt.manifestShouldChange {
|
||||
t.Errorf(
|
||||
"failed StaticPodControlPlane\n\texpected manifest change: %t\n\tgot: %t",
|
||||
"failed StaticPodControlPlane\n%s\n\texpected manifest change: %t\n\tgot: %t",
|
||||
rt.description,
|
||||
rt.manifestShouldChange,
|
||||
(oldHash != newHash),
|
||||
)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@ -396,12 +513,91 @@ func getAPIServerHash(dir string) (string, error) {
|
||||
return fmt.Sprintf("%x", sha256.Sum256(fileBytes)), nil
|
||||
}
|
||||
|
||||
func getConfig(version string, certsDir string) (*kubeadmapi.MasterConfiguration, error) {
|
||||
externalcfg := &kubeadmapiext.MasterConfiguration{}
|
||||
// TODO: Make this test function use the rest of the "official" API machinery helper funcs we have inside of kubeadm
|
||||
func getConfig(version, certsDir, etcdDataDir string) (*kubeadmapi.MasterConfiguration, error) {
|
||||
externalcfg := &kubeadmapiv1alpha2.MasterConfiguration{}
|
||||
internalcfg := &kubeadmapi.MasterConfiguration{}
|
||||
if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), []byte(fmt.Sprintf(testConfiguration, certsDir, version)), externalcfg); err != nil {
|
||||
if err := runtime.DecodeInto(kubeadmscheme.Codecs.UniversalDecoder(), []byte(fmt.Sprintf(testConfiguration, certsDir, etcdDataDir, version)), externalcfg); err != nil {
|
||||
return nil, fmt.Errorf("unable to decode config: %v", err)
|
||||
}
|
||||
legacyscheme.Scheme.Convert(externalcfg, internalcfg, nil)
|
||||
kubeadmscheme.Scheme.Convert(externalcfg, internalcfg, nil)
|
||||
return internalcfg, nil
|
||||
}
|
||||
|
||||
func getTempDir(t *testing.T, name string) (string, func()) {
|
||||
dir, err := ioutil.TempDir(os.TempDir(), name)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't make temporary directory: %v", err)
|
||||
}
|
||||
|
||||
return dir, func() {
|
||||
os.RemoveAll(dir)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanupDirs(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
keepManifest, keepEtcd bool
|
||||
}{
|
||||
{
|
||||
name: "save manifest backup",
|
||||
keepManifest: true,
|
||||
},
|
||||
{
|
||||
name: "save both etcd and manifest",
|
||||
keepManifest: true,
|
||||
keepEtcd: true,
|
||||
},
|
||||
{
|
||||
name: "save nothing",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
realManifestDir, cleanup := getTempDir(t, "realManifestDir")
|
||||
defer cleanup()
|
||||
|
||||
tempManifestDir, cleanup := getTempDir(t, "tempManifestDir")
|
||||
defer cleanup()
|
||||
|
||||
backupManifestDir, cleanup := getTempDir(t, "backupManifestDir")
|
||||
defer cleanup()
|
||||
|
||||
backupEtcdDir, cleanup := getTempDir(t, "backupEtcdDir")
|
||||
defer cleanup()
|
||||
|
||||
mgr := NewKubeStaticPodPathManager(realManifestDir, tempManifestDir, backupManifestDir, backupEtcdDir, test.keepManifest, test.keepEtcd)
|
||||
err := mgr.CleanupDirs()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error cleaning up: %v", err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(tempManifestDir); !os.IsNotExist(err) {
|
||||
t.Errorf("%q should not have existed", tempManifestDir)
|
||||
}
|
||||
_, err = os.Stat(backupManifestDir)
|
||||
if test.keepManifest {
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error getting backup manifest dir")
|
||||
}
|
||||
} else {
|
||||
if !os.IsNotExist(err) {
|
||||
t.Error("expected backup manifest to not exist")
|
||||
}
|
||||
}
|
||||
|
||||
_, err = os.Stat(backupEtcdDir)
|
||||
if test.keepEtcd {
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error getting backup etcd dir")
|
||||
}
|
||||
} else {
|
||||
if !os.IsNotExist(err) {
|
||||
t.Error("expected backup etcd dir to not exist")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
27
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/versiongetter.go
generated
vendored
27
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade/versiongetter.go
generated
vendored
@ -122,3 +122,30 @@ func computeKubeletVersions(nodes []v1.Node) map[string]uint16 {
|
||||
}
|
||||
return kubeletVersions
|
||||
}
|
||||
|
||||
// OfflineVersionGetter will use the version provided or
|
||||
type OfflineVersionGetter struct {
|
||||
VersionGetter
|
||||
version string
|
||||
}
|
||||
|
||||
// NewOfflineVersionGetter wraps a VersionGetter and skips online communication if default information is supplied.
|
||||
// Version can be "" and the behavior will be identical to the versionGetter passed in.
|
||||
func NewOfflineVersionGetter(versionGetter VersionGetter, version string) VersionGetter {
|
||||
return &OfflineVersionGetter{
|
||||
VersionGetter: versionGetter,
|
||||
version: version,
|
||||
}
|
||||
}
|
||||
|
||||
// VersionFromCILabel will return the version that was passed into the struct
|
||||
func (o *OfflineVersionGetter) VersionFromCILabel(ciVersionLabel, description string) (string, *versionutil.Version, error) {
|
||||
if o.version == "" {
|
||||
return o.VersionGetter.VersionFromCILabel(ciVersionLabel, description)
|
||||
}
|
||||
ver, err := versionutil.ParseSemantic(o.version)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("Couldn't parse version %s: %v", description, err)
|
||||
}
|
||||
return o.version, ver, nil
|
||||
}
|
||||
|
10
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig/BUILD
generated
vendored
@ -12,11 +12,11 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig",
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library",
|
||||
"//cmd/kubeadm/app/apis/kubeadm/scheme:go_default_library",
|
||||
"//cmd/kubeadm/app/apis/kubeadm/v1alpha2:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//cmd/kubeadm/app/util/apiclient:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
@ -42,9 +42,9 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library",
|
||||
"//cmd/kubeadm/app/apis/kubeadm/scheme:go_default_library",
|
||||
"//cmd/kubeadm/app/apis/kubeadm/v1alpha2:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
|
20
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig/uploadconfig.go
generated
vendored
20
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig/uploadconfig.go
generated
vendored
@ -19,31 +19,33 @@ package uploadconfig
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"
|
||||
kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"
|
||||
kubeadmapiv1alpha2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha2"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
)
|
||||
|
||||
// UploadConfiguration saves the MasterConfiguration used for later reference (when upgrading for instance)
|
||||
func UploadConfiguration(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
|
||||
|
||||
fmt.Printf("[uploadconfig] Storing the configuration used in ConfigMap %q in the %q Namespace\n", kubeadmconstants.MasterConfigurationConfigMap, metav1.NamespaceSystem)
|
||||
fmt.Printf("[uploadconfig] storing the configuration used in ConfigMap %q in the %q Namespace\n", kubeadmconstants.MasterConfigurationConfigMap, metav1.NamespaceSystem)
|
||||
|
||||
// Convert cfg to the external version as that's the only version of the API that can be deserialized later
|
||||
externalcfg := &kubeadmapiext.MasterConfiguration{}
|
||||
legacyscheme.Scheme.Convert(cfg, externalcfg, nil)
|
||||
externalcfg := &kubeadmapiv1alpha2.MasterConfiguration{}
|
||||
kubeadmscheme.Scheme.Convert(cfg, externalcfg, nil)
|
||||
|
||||
// Removes sensitive info from the data that will be stored in the config map
|
||||
externalcfg.Token = ""
|
||||
externalcfg.BootstrapTokens = nil
|
||||
// Clear the NodeRegistration object.
|
||||
externalcfg.NodeRegistration = kubeadmapiv1alpha2.NodeRegistrationOptions{}
|
||||
|
||||
cfgYaml, err := yaml.Marshal(*externalcfg)
|
||||
cfgYaml, err := util.MarshalToYamlForCodecs(externalcfg, kubeadmapiv1alpha2.SchemeGroupVersion, scheme.Codecs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
45
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig/uploadconfig_test.go
generated
vendored
45
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig/uploadconfig_test.go
generated
vendored
@ -25,9 +25,9 @@ import (
|
||||
clientsetfake "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
|
||||
kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"
|
||||
kubeadmapiv1alpha2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha2"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
)
|
||||
|
||||
func TestUploadConfiguration(t *testing.T) {
|
||||
@ -63,8 +63,19 @@ func TestUploadConfiguration(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &kubeadmapi.MasterConfiguration{
|
||||
KubernetesVersion: "1.7.3",
|
||||
Token: "1234567",
|
||||
KubernetesVersion: "v1.10.3",
|
||||
BootstrapTokens: []kubeadmapi.BootstrapToken{
|
||||
{
|
||||
Token: &kubeadmapi.BootstrapTokenString{
|
||||
ID: "abcdef",
|
||||
Secret: "abcdef0123456789",
|
||||
},
|
||||
},
|
||||
},
|
||||
NodeRegistration: kubeadmapi.NodeRegistrationOptions{
|
||||
Name: "node-foo",
|
||||
CRISocket: "/var/run/custom-cri.sock",
|
||||
},
|
||||
}
|
||||
client := clientsetfake.NewSimpleClientset()
|
||||
if tt.errOnCreate != nil {
|
||||
@ -96,22 +107,36 @@ func TestUploadConfiguration(t *testing.T) {
|
||||
t.Errorf("Fail to find ConfigMap key")
|
||||
}
|
||||
|
||||
decodedExtCfg := &kubeadmapiext.MasterConfiguration{}
|
||||
decodedExtCfg := &kubeadmapiv1alpha2.MasterConfiguration{}
|
||||
decodedCfg := &kubeadmapi.MasterConfiguration{}
|
||||
|
||||
if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), []byte(configData), decodedExtCfg); err != nil {
|
||||
if err := runtime.DecodeInto(kubeadmscheme.Codecs.UniversalDecoder(), []byte(configData), decodedExtCfg); err != nil {
|
||||
t.Errorf("unable to decode config from bytes: %v", err)
|
||||
}
|
||||
// Default and convert to the internal version
|
||||
legacyscheme.Scheme.Default(decodedExtCfg)
|
||||
legacyscheme.Scheme.Convert(decodedExtCfg, decodedCfg, nil)
|
||||
kubeadmscheme.Scheme.Default(decodedExtCfg)
|
||||
kubeadmscheme.Scheme.Convert(decodedExtCfg, decodedCfg, nil)
|
||||
|
||||
if decodedCfg.KubernetesVersion != cfg.KubernetesVersion {
|
||||
t.Errorf("Decoded value doesn't match, decoded = %#v, expected = %#v", decodedCfg.KubernetesVersion, cfg.KubernetesVersion)
|
||||
}
|
||||
|
||||
if decodedCfg.Token != "" {
|
||||
t.Errorf("Decoded value contains token (sensitive info), decoded = %#v, expected = empty", decodedCfg.Token)
|
||||
// If the decoded cfg has a BootstrapTokens array, verify the sensitive information we had isn't still there.
|
||||
if len(decodedCfg.BootstrapTokens) > 0 && decodedCfg.BootstrapTokens[0].Token != nil && decodedCfg.BootstrapTokens[0].Token.String() == cfg.BootstrapTokens[0].Token.String() {
|
||||
t.Errorf("Decoded value contains .BootstrapTokens (sensitive info), decoded = %#v, expected = empty", decodedCfg.BootstrapTokens)
|
||||
}
|
||||
|
||||
// Make sure no information from NodeRegistrationOptions was uploaded.
|
||||
if decodedCfg.NodeRegistration.Name == cfg.NodeRegistration.Name || decodedCfg.NodeRegistration.CRISocket != kubeadmapiv1alpha2.DefaultCRISocket {
|
||||
t.Errorf("Decoded value contains .NodeRegistration (node-specific info shouldn't be uploaded), decoded = %#v, expected = empty", decodedCfg.NodeRegistration)
|
||||
}
|
||||
|
||||
if decodedExtCfg.Kind != "MasterConfiguration" {
|
||||
t.Errorf("Expected kind MasterConfiguration, got %v", decodedExtCfg.Kind)
|
||||
}
|
||||
|
||||
if decodedExtCfg.APIVersion != "kubeadm.k8s.io/v1alpha2" {
|
||||
t.Errorf("Expected apiVersion kubeadm.k8s.io/v1alpha2, got %v", decodedExtCfg.APIVersion)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
Reference in New Issue
Block a user