vendor cleanup: remove unused,non-go and test files

This commit is contained in:
Madhu Rajanna
2019-01-16 00:05:52 +05:30
parent 52cf4aa902
commit b10ba188e7
15421 changed files with 17 additions and 4208853 deletions

View File

@ -1,198 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"client_ca_hook.go",
"client_util.go",
"controller.go",
"doc.go",
"import_known_versions.go",
"master.go",
"services.go",
],
importpath = "k8s.io/kubernetes/pkg/master",
deps = [
"//pkg/apis/admission/install:go_default_library",
"//pkg/apis/admissionregistration/install:go_default_library",
"//pkg/apis/apps/install:go_default_library",
"//pkg/apis/authentication/install:go_default_library",
"//pkg/apis/authorization/install:go_default_library",
"//pkg/apis/autoscaling/install:go_default_library",
"//pkg/apis/batch/install:go_default_library",
"//pkg/apis/certificates/install:go_default_library",
"//pkg/apis/componentconfig/install:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/core/install:go_default_library",
"//pkg/apis/events/install:go_default_library",
"//pkg/apis/extensions/install:go_default_library",
"//pkg/apis/imagepolicy/install:go_default_library",
"//pkg/apis/networking/install:go_default_library",
"//pkg/apis/policy/install:go_default_library",
"//pkg/apis/rbac/install:go_default_library",
"//pkg/apis/scheduling/install:go_default_library",
"//pkg/apis/settings/install:go_default_library",
"//pkg/apis/storage/install:go_default_library",
"//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library",
"//pkg/kubeapiserver/options:go_default_library",
"//pkg/kubelet/client:go_default_library",
"//pkg/master/reconcilers:go_default_library",
"//pkg/master/tunneler:go_default_library",
"//pkg/registry/admissionregistration/rest:go_default_library",
"//pkg/registry/apps/rest:go_default_library",
"//pkg/registry/authentication/rest:go_default_library",
"//pkg/registry/authorization/rest:go_default_library",
"//pkg/registry/autoscaling/rest:go_default_library",
"//pkg/registry/batch/rest:go_default_library",
"//pkg/registry/certificates/rest:go_default_library",
"//pkg/registry/core/endpoint:go_default_library",
"//pkg/registry/core/endpoint/storage:go_default_library",
"//pkg/registry/core/rangeallocation:go_default_library",
"//pkg/registry/core/rest:go_default_library",
"//pkg/registry/core/service/ipallocator:go_default_library",
"//pkg/registry/core/service/ipallocator/controller:go_default_library",
"//pkg/registry/core/service/portallocator/controller:go_default_library",
"//pkg/registry/events/rest:go_default_library",
"//pkg/registry/extensions/rest:go_default_library",
"//pkg/registry/networking/rest:go_default_library",
"//pkg/registry/policy/rest:go_default_library",
"//pkg/registry/rbac/rest:go_default_library",
"//pkg/registry/scheduling/rest:go_default_library",
"//pkg/registry/settings/rest:go_default_library",
"//pkg/registry/storage/rest:go_default_library",
"//pkg/routes:go_default_library",
"//pkg/serviceaccount:go_default_library",
"//pkg/util/async:go_default_library",
"//pkg/util/node:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
"//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
"//vendor/k8s.io/api/authentication/v1:go_default_library",
"//vendor/k8s.io/api/authentication/v1beta1:go_default_library",
"//vendor/k8s.io/api/authorization/v1:go_default_library",
"//vendor/k8s.io/api/authorization/v1beta1:go_default_library",
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
"//vendor/k8s.io/api/batch/v2alpha1:go_default_library",
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/events/v1beta1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/networking/v1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1alpha1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library",
"//vendor/k8s.io/api/scheduling/v1beta1:go_default_library",
"//vendor/k8s.io/api/settings/v1alpha1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1alpha1:go_default_library",
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/endpoints/discovery:go_default_library",
"//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library",
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
"//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library",
"//vendor/k8s.io/apiserver/pkg/server/storage:go_default_library",
"//vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
],
)
go_test(
name = "go_default_test",
size = "medium",
timeout = "long",
srcs = [
"client_ca_hook_test.go",
"controller_test.go",
"import_known_versions_test.go",
"master_openapi_test.go",
"master_test.go",
],
embed = [":go_default_library"],
race = "off",
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/api/testapi:go_default_library",
"//pkg/apis/apps:go_default_library",
"//pkg/apis/autoscaling:go_default_library",
"//pkg/apis/batch:go_default_library",
"//pkg/apis/certificates:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/apis/rbac:go_default_library",
"//pkg/apis/storage:go_default_library",
"//pkg/client/clientset_generated/internalclientset/fake:go_default_library",
"//pkg/generated/openapi:go_default_library",
"//pkg/kubelet/client:go_default_library",
"//pkg/master/reconcilers:go_default_library",
"//pkg/registry/certificates/rest:go_default_library",
"//pkg/registry/core/rest:go_default_library",
"//pkg/registry/registrytest:go_default_library",
"//pkg/version:go_default_library",
"//vendor/github.com/go-openapi/loads:go_default_library",
"//vendor/github.com/go-openapi/spec:go_default_library",
"//vendor/github.com/go-openapi/strfmt:go_default_library",
"//vendor/github.com/go-openapi/validate:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/version:go_default_library",
"//vendor/k8s.io/apiserver/pkg/endpoints/openapi:go_default_library",
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
"//vendor/k8s.io/apiserver/pkg/server/options:go_default_library",
"//vendor/k8s.io/apiserver/pkg/server/storage:go_default_library",
"//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/master/controller/crdregistration:all-srcs",
"//pkg/master/ports:all-srcs",
"//pkg/master/reconcilers:all-srcs",
"//pkg/master/tunneler:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,41 +0,0 @@
approvers:
- deads2k
- derekwaynecarr
- lavalamp
- mikedanese
- nikhiljindal
- sttts
- wojtek-t
reviewers:
- thockin
- lavalamp
- smarterclayton
- wojtek-t
- deads2k
- yujuhong
- derekwaynecarr
- caesarxuchao
- mikedanese
- liggitt
- nikhiljindal
- gmarek
- erictune
- davidopp
- pmorie
- sttts
- dchen1107
- saad-ali
- luxas
- janetkuo
- justinsb
- roberthbailey
- ncdc
- tallclair
- mwielgus
- timothysc
- soltysh
- piosz
- madhusudancs
- hongchaodeng
- jszczepkowski
- enj

View File

@ -1,143 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package master
import (
"encoding/json"
"fmt"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
genericapiserver "k8s.io/apiserver/pkg/server"
api "k8s.io/kubernetes/pkg/apis/core"
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
)
type ClientCARegistrationHook struct {
ClientCA []byte
RequestHeaderUsernameHeaders []string
RequestHeaderGroupHeaders []string
RequestHeaderExtraHeaderPrefixes []string
RequestHeaderCA []byte
RequestHeaderAllowedNames []string
}
func (h ClientCARegistrationHook) PostStartHook(hookContext genericapiserver.PostStartHookContext) error {
// no work to do
if len(h.ClientCA) == 0 && len(h.RequestHeaderCA) == 0 {
return nil
}
// initializing CAs is important so that aggregated API servers can come up with "normal" config.
// We've seen lagging etcd before, so we want to retry this a few times before we decide to crashloop
// the API server on it.
err := wait.Poll(1*time.Second, 30*time.Second, func() (done bool, err error) {
// retry building the config since sometimes the server can be in an in-between state which caused
// some kind of auto detection failure as I recall from other post start hooks.
// TODO see if this is still true and fix the RBAC one too if it isn't.
client, err := coreclient.NewForConfig(hookContext.LoopbackClientConfig)
if err != nil {
utilruntime.HandleError(err)
return false, nil
}
return h.tryToWriteClientCAs(client)
})
// if we're never able to make it through initialization, kill the API server
if err != nil {
return fmt.Errorf("unable to initialize client CA configmap: %v", err)
}
return nil
}
// tryToWriteClientCAs is here for unit testing with a fake client. This is a wait.ConditionFunc so the bool
// indicates if the condition was met. True when its finished, false when it should retry.
func (h ClientCARegistrationHook) tryToWriteClientCAs(client coreclient.CoreInterface) (bool, error) {
if err := createNamespaceIfNeeded(client, metav1.NamespaceSystem); err != nil {
utilruntime.HandleError(err)
return false, nil
}
data := map[string]string{}
if len(h.ClientCA) > 0 {
data["client-ca-file"] = string(h.ClientCA)
}
if len(h.RequestHeaderCA) > 0 {
var err error
// encoding errors aren't going to get better, so just fail on them.
data["requestheader-username-headers"], err = jsonSerializeStringSlice(h.RequestHeaderUsernameHeaders)
if err != nil {
return false, err
}
data["requestheader-group-headers"], err = jsonSerializeStringSlice(h.RequestHeaderGroupHeaders)
if err != nil {
return false, err
}
data["requestheader-extra-headers-prefix"], err = jsonSerializeStringSlice(h.RequestHeaderExtraHeaderPrefixes)
if err != nil {
return false, err
}
data["requestheader-client-ca-file"] = string(h.RequestHeaderCA)
data["requestheader-allowed-names"], err = jsonSerializeStringSlice(h.RequestHeaderAllowedNames)
if err != nil {
return false, err
}
}
// write errors may work next time if we retry, so queue for retry
if err := writeConfigMap(client, "extension-apiserver-authentication", data); err != nil {
utilruntime.HandleError(err)
return false, nil
}
return true, nil
}
func jsonSerializeStringSlice(in []string) (string, error) {
out, err := json.Marshal(in)
if err != nil {
return "", err
}
return string(out), err
}
func writeConfigMap(client coreclient.ConfigMapsGetter, name string, data map[string]string) error {
existing, err := client.ConfigMaps(metav1.NamespaceSystem).Get(name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err := client.ConfigMaps(metav1.NamespaceSystem).Create(&api.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: name},
Data: data,
})
return err
}
if err != nil {
return err
}
existing.Data = data
_, err = client.ConfigMaps(metav1.NamespaceSystem).Update(existing)
return err
}

View File

@ -1,223 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package master
import (
"reflect"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/diff"
clienttesting "k8s.io/client-go/testing"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
)
func TestWriteClientCAs(t *testing.T) {
tests := []struct {
name string
hook ClientCARegistrationHook
preexistingObjs []runtime.Object
expectedConfigMaps map[string]*api.ConfigMap
expectUpdate bool
}{
{
name: "basic",
hook: ClientCARegistrationHook{
ClientCA: []byte("foo"),
RequestHeaderUsernameHeaders: []string{"alfa", "bravo", "charlie"},
RequestHeaderGroupHeaders: []string{"delta"},
RequestHeaderExtraHeaderPrefixes: []string{"echo", "foxtrot"},
RequestHeaderCA: []byte("bar"),
RequestHeaderAllowedNames: []string{"first", "second"},
},
expectedConfigMaps: map[string]*api.ConfigMap{
"extension-apiserver-authentication": {
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: "extension-apiserver-authentication"},
Data: map[string]string{
"client-ca-file": "foo",
"requestheader-username-headers": `["alfa","bravo","charlie"]`,
"requestheader-group-headers": `["delta"]`,
"requestheader-extra-headers-prefix": `["echo","foxtrot"]`,
"requestheader-client-ca-file": "bar",
"requestheader-allowed-names": `["first","second"]`,
},
},
},
},
{
name: "skip extension-apiserver-authentication",
hook: ClientCARegistrationHook{
RequestHeaderCA: []byte("bar"),
RequestHeaderAllowedNames: []string{"first", "second"},
},
expectedConfigMaps: map[string]*api.ConfigMap{
"extension-apiserver-authentication": {
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: "extension-apiserver-authentication"},
Data: map[string]string{
"requestheader-username-headers": `null`,
"requestheader-group-headers": `null`,
"requestheader-extra-headers-prefix": `null`,
"requestheader-client-ca-file": "bar",
"requestheader-allowed-names": `["first","second"]`,
},
},
},
},
{
name: "skip extension-apiserver-authentication",
hook: ClientCARegistrationHook{
ClientCA: []byte("foo"),
},
expectedConfigMaps: map[string]*api.ConfigMap{
"extension-apiserver-authentication": {
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: "extension-apiserver-authentication"},
Data: map[string]string{
"client-ca-file": "foo",
},
},
},
},
{
name: "empty allowed names",
hook: ClientCARegistrationHook{
RequestHeaderCA: []byte("bar"),
},
expectedConfigMaps: map[string]*api.ConfigMap{
"extension-apiserver-authentication": {
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: "extension-apiserver-authentication"},
Data: map[string]string{
"requestheader-username-headers": `null`,
"requestheader-group-headers": `null`,
"requestheader-extra-headers-prefix": `null`,
"requestheader-client-ca-file": "bar",
"requestheader-allowed-names": `null`,
},
},
},
},
{
name: "overwrite extension-apiserver-authentication",
hook: ClientCARegistrationHook{
ClientCA: []byte("foo"),
},
preexistingObjs: []runtime.Object{
&api.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: "extension-apiserver-authentication"},
Data: map[string]string{
"client-ca-file": "other",
},
},
},
expectedConfigMaps: map[string]*api.ConfigMap{
"extension-apiserver-authentication": {
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: "extension-apiserver-authentication"},
Data: map[string]string{
"client-ca-file": "foo",
},
},
},
expectUpdate: true,
},
{
name: "overwrite extension-apiserver-authentication requestheader",
hook: ClientCARegistrationHook{
RequestHeaderUsernameHeaders: []string{},
RequestHeaderGroupHeaders: []string{},
RequestHeaderExtraHeaderPrefixes: []string{},
RequestHeaderCA: []byte("bar"),
RequestHeaderAllowedNames: []string{},
},
preexistingObjs: []runtime.Object{
&api.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: "extension-apiserver-authentication"},
Data: map[string]string{
"requestheader-username-headers": `null`,
"requestheader-group-headers": `null`,
"requestheader-extra-headers-prefix": `null`,
"requestheader-client-ca-file": "something",
"requestheader-allowed-names": `null`,
},
},
},
expectedConfigMaps: map[string]*api.ConfigMap{
"extension-apiserver-authentication": {
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: "extension-apiserver-authentication"},
Data: map[string]string{
"requestheader-username-headers": `[]`,
"requestheader-group-headers": `[]`,
"requestheader-extra-headers-prefix": `[]`,
"requestheader-client-ca-file": "bar",
"requestheader-allowed-names": `[]`,
},
},
},
expectUpdate: true,
},
{
name: "namespace exists",
hook: ClientCARegistrationHook{
ClientCA: []byte("foo"),
},
preexistingObjs: []runtime.Object{
&api.Namespace{ObjectMeta: metav1.ObjectMeta{Name: metav1.NamespaceSystem}},
},
expectedConfigMaps: map[string]*api.ConfigMap{
"extension-apiserver-authentication": {
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: "extension-apiserver-authentication"},
Data: map[string]string{
"client-ca-file": "foo",
},
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
client := fake.NewSimpleClientset(test.preexistingObjs...)
test.hook.tryToWriteClientCAs(client.Core())
actualConfigMaps, updated := getFinalConfiMaps(client)
if !reflect.DeepEqual(test.expectedConfigMaps, actualConfigMaps) {
t.Fatalf("%s: %v", test.name, diff.ObjectReflectDiff(test.expectedConfigMaps, actualConfigMaps))
}
if test.expectUpdate != updated {
t.Fatalf("%s: expected %v, got %v", test.name, test.expectUpdate, updated)
}
})
}
}
func getFinalConfiMaps(client *fake.Clientset) (map[string]*api.ConfigMap, bool) {
ret := map[string]*api.ConfigMap{}
updated := false
for _, action := range client.Actions() {
if action.Matches("create", "configmaps") {
obj := action.(clienttesting.CreateAction).GetObject().(*api.ConfigMap)
ret[obj.Name] = obj
}
if action.Matches("update", "configmaps") {
updated = true
obj := action.(clienttesting.UpdateAction).GetObject().(*api.ConfigMap)
ret[obj.Name] = obj
}
}
return ret, updated
}

View File

@ -1,42 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package master
import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
api "k8s.io/kubernetes/pkg/apis/core"
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
)
func createNamespaceIfNeeded(c coreclient.NamespacesGetter, ns string) error {
if _, err := c.Namespaces().Get(ns, metav1.GetOptions{}); err == nil {
// the namespace already exists
return nil
}
newNs := &api.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: ns,
Namespace: "",
},
}
_, err := c.Namespaces().Create(newNs)
if err != nil && errors.IsAlreadyExists(err) {
err = nil
}
return err
}

View File

@ -1,285 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package master
import (
"fmt"
"net"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
genericapiserver "k8s.io/apiserver/pkg/server"
api "k8s.io/kubernetes/pkg/apis/core"
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
"k8s.io/kubernetes/pkg/master/reconcilers"
"k8s.io/kubernetes/pkg/registry/core/rangeallocation"
corerest "k8s.io/kubernetes/pkg/registry/core/rest"
servicecontroller "k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller"
portallocatorcontroller "k8s.io/kubernetes/pkg/registry/core/service/portallocator/controller"
"k8s.io/kubernetes/pkg/util/async"
)
const kubernetesServiceName = "kubernetes"
// Controller is the controller manager for the core bootstrap Kubernetes
// controller loops, which manage creating the "kubernetes" service, the
// "default", "kube-system" and "kube-public" namespaces, and provide the IP
// repair check on service IPs
type Controller struct {
ServiceClient coreclient.ServicesGetter
NamespaceClient coreclient.NamespacesGetter
EventClient coreclient.EventsGetter
ServiceClusterIPRegistry rangeallocation.RangeRegistry
ServiceClusterIPInterval time.Duration
ServiceClusterIPRange net.IPNet
ServiceNodePortRegistry rangeallocation.RangeRegistry
ServiceNodePortInterval time.Duration
ServiceNodePortRange utilnet.PortRange
EndpointReconciler reconcilers.EndpointReconciler
EndpointInterval time.Duration
SystemNamespaces []string
SystemNamespacesInterval time.Duration
PublicIP net.IP
// ServiceIP indicates where the kubernetes service will live. It may not be nil.
ServiceIP net.IP
ServicePort int
ExtraServicePorts []api.ServicePort
ExtraEndpointPorts []api.EndpointPort
PublicServicePort int
KubernetesServiceNodePort int
runner *async.Runner
}
// NewBootstrapController returns a controller for watching the core capabilities of the master
func (c *completedConfig) NewBootstrapController(legacyRESTStorage corerest.LegacyRESTStorage, serviceClient coreclient.ServicesGetter, nsClient coreclient.NamespacesGetter, eventClient coreclient.EventsGetter) *Controller {
return &Controller{
ServiceClient: serviceClient,
NamespaceClient: nsClient,
EventClient: eventClient,
EndpointReconciler: c.ExtraConfig.EndpointReconcilerConfig.Reconciler,
EndpointInterval: c.ExtraConfig.EndpointReconcilerConfig.Interval,
SystemNamespaces: []string{metav1.NamespaceSystem, metav1.NamespacePublic},
SystemNamespacesInterval: 1 * time.Minute,
ServiceClusterIPRegistry: legacyRESTStorage.ServiceClusterIPAllocator,
ServiceClusterIPRange: c.ExtraConfig.ServiceIPRange,
ServiceClusterIPInterval: 3 * time.Minute,
ServiceNodePortRegistry: legacyRESTStorage.ServiceNodePortAllocator,
ServiceNodePortRange: c.ExtraConfig.ServiceNodePortRange,
ServiceNodePortInterval: 3 * time.Minute,
PublicIP: c.GenericConfig.PublicAddress,
ServiceIP: c.ExtraConfig.APIServerServiceIP,
ServicePort: c.ExtraConfig.APIServerServicePort,
ExtraServicePorts: c.ExtraConfig.ExtraServicePorts,
ExtraEndpointPorts: c.ExtraConfig.ExtraEndpointPorts,
PublicServicePort: c.GenericConfig.ReadWritePort,
KubernetesServiceNodePort: c.ExtraConfig.KubernetesServiceNodePort,
}
}
func (c *Controller) PostStartHook(hookContext genericapiserver.PostStartHookContext) error {
c.Start()
return nil
}
func (c *Controller) PreShutdownHook() error {
c.Stop()
return nil
}
// Start begins the core controller loops that must exist for bootstrapping
// a cluster.
func (c *Controller) Start() {
if c.runner != nil {
return
}
repairClusterIPs := servicecontroller.NewRepair(c.ServiceClusterIPInterval, c.ServiceClient, c.EventClient, &c.ServiceClusterIPRange, c.ServiceClusterIPRegistry)
repairNodePorts := portallocatorcontroller.NewRepair(c.ServiceNodePortInterval, c.ServiceClient, c.EventClient, c.ServiceNodePortRange, c.ServiceNodePortRegistry)
// run all of the controllers once prior to returning from Start.
if err := repairClusterIPs.RunOnce(); err != nil {
// If we fail to repair cluster IPs apiserver is useless. We should restart and retry.
glog.Fatalf("Unable to perform initial IP allocation check: %v", err)
}
if err := repairNodePorts.RunOnce(); err != nil {
// If we fail to repair node ports apiserver is useless. We should restart and retry.
glog.Fatalf("Unable to perform initial service nodePort check: %v", err)
}
// Service definition is reconciled during first run to correct port and type per expectations.
if err := c.UpdateKubernetesService(true); err != nil {
glog.Errorf("Unable to perform initial Kubernetes service initialization: %v", err)
}
c.runner = async.NewRunner(c.RunKubernetesNamespaces, c.RunKubernetesService, repairClusterIPs.RunUntil, repairNodePorts.RunUntil)
c.runner.Start()
}
func (c *Controller) Stop() {
if c.runner != nil {
c.runner.Stop()
}
endpointPorts := createEndpointPortSpec(c.PublicServicePort, "https", c.ExtraEndpointPorts)
finishedReconciling := make(chan struct{})
go func() {
defer close(finishedReconciling)
glog.Infof("Shutting down kubernetes service endpoint reconciler")
if err := c.EndpointReconciler.StopReconciling("kubernetes", c.PublicIP, endpointPorts); err != nil {
glog.Error(err)
}
}()
select {
case <-finishedReconciling:
// done
case <-time.After(2 * c.EndpointInterval):
// don't block server shutdown forever if we can't reach etcd to remove ourselves
glog.Warning("StopReconciling() timed out")
}
}
// RunKubernetesNamespaces periodically makes sure that all internal namespaces exist
func (c *Controller) RunKubernetesNamespaces(ch chan struct{}) {
wait.Until(func() {
// Loop the system namespace list, and create them if they do not exist
for _, ns := range c.SystemNamespaces {
if err := createNamespaceIfNeeded(c.NamespaceClient, ns); err != nil {
runtime.HandleError(fmt.Errorf("unable to create required kubernetes system namespace %s: %v", ns, err))
}
}
}, c.SystemNamespacesInterval, ch)
}
// RunKubernetesService periodically updates the kubernetes service
func (c *Controller) RunKubernetesService(ch chan struct{}) {
wait.Until(func() {
// Service definition is not reconciled after first
// run, ports and type will be corrected only during
// start.
if err := c.UpdateKubernetesService(false); err != nil {
runtime.HandleError(fmt.Errorf("unable to sync kubernetes service: %v", err))
}
}, c.EndpointInterval, ch)
}
// UpdateKubernetesService attempts to update the default Kube service.
func (c *Controller) UpdateKubernetesService(reconcile bool) error {
// Update service & endpoint records.
// TODO: when it becomes possible to change this stuff,
// stop polling and start watching.
// TODO: add endpoints of all replicas, not just the elected master.
if err := createNamespaceIfNeeded(c.NamespaceClient, metav1.NamespaceDefault); err != nil {
return err
}
servicePorts, serviceType := createPortAndServiceSpec(c.ServicePort, c.PublicServicePort, c.KubernetesServiceNodePort, "https", c.ExtraServicePorts)
if err := c.CreateOrUpdateMasterServiceIfNeeded(kubernetesServiceName, c.ServiceIP, servicePorts, serviceType, reconcile); err != nil {
return err
}
endpointPorts := createEndpointPortSpec(c.PublicServicePort, "https", c.ExtraEndpointPorts)
if err := c.EndpointReconciler.ReconcileEndpoints(kubernetesServiceName, c.PublicIP, endpointPorts, reconcile); err != nil {
return err
}
return nil
}
// createPortAndServiceSpec creates an array of service ports.
// If the NodePort value is 0, just the servicePort is used, otherwise, a node port is exposed.
func createPortAndServiceSpec(servicePort int, targetServicePort int, nodePort int, servicePortName string, extraServicePorts []api.ServicePort) ([]api.ServicePort, api.ServiceType) {
//Use the Cluster IP type for the service port if NodePort isn't provided.
//Otherwise, we will be binding the master service to a NodePort.
servicePorts := []api.ServicePort{{Protocol: api.ProtocolTCP,
Port: int32(servicePort),
Name: servicePortName,
TargetPort: intstr.FromInt(targetServicePort)}}
serviceType := api.ServiceTypeClusterIP
if nodePort > 0 {
servicePorts[0].NodePort = int32(nodePort)
serviceType = api.ServiceTypeNodePort
}
if extraServicePorts != nil {
servicePorts = append(servicePorts, extraServicePorts...)
}
return servicePorts, serviceType
}
// createEndpointPortSpec creates an array of endpoint ports
func createEndpointPortSpec(endpointPort int, endpointPortName string, extraEndpointPorts []api.EndpointPort) []api.EndpointPort {
endpointPorts := []api.EndpointPort{{Protocol: api.ProtocolTCP,
Port: int32(endpointPort),
Name: endpointPortName,
}}
if extraEndpointPorts != nil {
endpointPorts = append(endpointPorts, extraEndpointPorts...)
}
return endpointPorts
}
// CreateMasterServiceIfNeeded will create the specified service if it
// doesn't already exist.
func (c *Controller) CreateOrUpdateMasterServiceIfNeeded(serviceName string, serviceIP net.IP, servicePorts []api.ServicePort, serviceType api.ServiceType, reconcile bool) error {
if s, err := c.ServiceClient.Services(metav1.NamespaceDefault).Get(serviceName, metav1.GetOptions{}); err == nil {
// The service already exists.
if reconcile {
if svc, updated := reconcilers.GetMasterServiceUpdateIfNeeded(s, servicePorts, serviceType); updated {
glog.Warningf("Resetting master service %q to %#v", serviceName, svc)
_, err := c.ServiceClient.Services(metav1.NamespaceDefault).Update(svc)
return err
}
}
return nil
}
svc := &api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
Namespace: metav1.NamespaceDefault,
Labels: map[string]string{"provider": "kubernetes", "component": "apiserver"},
},
Spec: api.ServiceSpec{
Ports: servicePorts,
// maintained by this code, not by the pod selector
Selector: nil,
ClusterIP: serviceIP.String(),
SessionAffinity: api.ServiceAffinityNone,
Type: serviceType,
},
}
_, err := c.ServiceClient.Services(metav1.NamespaceDefault).Create(svc)
if errors.IsAlreadyExists(err) {
return c.CreateOrUpdateMasterServiceIfNeeded(serviceName, serviceIP, servicePorts, serviceType, reconcile)
}
return err
}

View File

@ -1,55 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = ["crdregistration_controller.go"],
importpath = "k8s.io/kubernetes/pkg/master/controller/crdregistration",
deps = [
"//pkg/controller:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
"//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["crdregistration_controller_test.go"],
embed = [":go_default_library"],
deps = [
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration:go_default_library",
],
)

View File

@ -1,226 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package crdregistration
import (
"fmt"
"time"
"github.com/golang/glog"
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
crdinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion"
crdlisters "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/kube-aggregator/pkg/apis/apiregistration"
"k8s.io/kubernetes/pkg/controller"
)
// AutoAPIServiceRegistration is an interface which callers can re-declare locally and properly cast to for
// adding and removing APIServices
type AutoAPIServiceRegistration interface {
// AddAPIServiceToSync adds an API service to auto-register.
AddAPIServiceToSync(in *apiregistration.APIService)
// RemoveAPIServiceToSync removes an API service to auto-register.
RemoveAPIServiceToSync(name string)
}
type crdRegistrationController struct {
crdLister crdlisters.CustomResourceDefinitionLister
crdSynced cache.InformerSynced
apiServiceRegistration AutoAPIServiceRegistration
syncHandler func(groupVersion schema.GroupVersion) error
syncedInitialSet chan struct{}
// queue is where incoming work is placed to de-dup and to allow "easy" rate limited requeues on errors
// this is actually keyed by a groupVersion
queue workqueue.RateLimitingInterface
}
// NewAutoRegistrationController returns a controller which will register CRD GroupVersions with the auto APIService registration
// controller so they automatically stay in sync.
func NewAutoRegistrationController(crdinformer crdinformers.CustomResourceDefinitionInformer, apiServiceRegistration AutoAPIServiceRegistration) *crdRegistrationController {
c := &crdRegistrationController{
crdLister: crdinformer.Lister(),
crdSynced: crdinformer.Informer().HasSynced,
apiServiceRegistration: apiServiceRegistration,
syncedInitialSet: make(chan struct{}),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "crd-autoregister"),
}
c.syncHandler = c.handleVersionUpdate
crdinformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
cast := obj.(*apiextensions.CustomResourceDefinition)
c.enqueueCRD(cast)
},
UpdateFunc: func(oldObj, newObj interface{}) {
// Enqueue both old and new object to make sure we remove and add appropriate API services.
// The working queue will resolve any duplicates and only changes will stay in the queue.
c.enqueueCRD(oldObj.(*apiextensions.CustomResourceDefinition))
c.enqueueCRD(newObj.(*apiextensions.CustomResourceDefinition))
},
DeleteFunc: func(obj interface{}) {
cast, ok := obj.(*apiextensions.CustomResourceDefinition)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.V(2).Infof("Couldn't get object from tombstone %#v", obj)
return
}
cast, ok = tombstone.Obj.(*apiextensions.CustomResourceDefinition)
if !ok {
glog.V(2).Infof("Tombstone contained unexpected object: %#v", obj)
return
}
}
c.enqueueCRD(cast)
},
})
return c
}
func (c *crdRegistrationController) Run(threadiness int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
// make sure the work queue is shutdown which will trigger workers to end
defer c.queue.ShutDown()
glog.Infof("Starting crd-autoregister controller")
defer glog.Infof("Shutting down crd-autoregister controller")
// wait for your secondary caches to fill before starting your work
if !controller.WaitForCacheSync("crd-autoregister", stopCh, c.crdSynced) {
return
}
// process each item in the list once
if crds, err := c.crdLister.List(labels.Everything()); err != nil {
utilruntime.HandleError(err)
} else {
for _, crd := range crds {
for _, version := range crd.Spec.Versions {
if err := c.syncHandler(schema.GroupVersion{Group: crd.Spec.Group, Version: version.Name}); err != nil {
utilruntime.HandleError(err)
}
}
}
}
close(c.syncedInitialSet)
// start up your worker threads based on threadiness. Some controllers have multiple kinds of workers
for i := 0; i < threadiness; i++ {
// runWorker will loop until "something bad" happens. The .Until will then rekick the worker
// after one second
go wait.Until(c.runWorker, time.Second, stopCh)
}
// wait until we're told to stop
<-stopCh
}
// WaitForInitialSync blocks until the initial set of CRD resources has been processed
func (c *crdRegistrationController) WaitForInitialSync() {
<-c.syncedInitialSet
}
func (c *crdRegistrationController) runWorker() {
// hot loop until we're told to stop. processNextWorkItem will automatically wait until there's work
// available, so we don't worry about secondary waits
for c.processNextWorkItem() {
}
}
// processNextWorkItem deals with one key off the queue. It returns false when it's time to quit.
func (c *crdRegistrationController) processNextWorkItem() bool {
// pull the next work item from queue. It should be a key we use to lookup something in a cache
key, quit := c.queue.Get()
if quit {
return false
}
// you always have to indicate to the queue that you've completed a piece of work
defer c.queue.Done(key)
// do your work on the key. This method will contains your "do stuff" logic
err := c.syncHandler(key.(schema.GroupVersion))
if err == nil {
// if you had no error, tell the queue to stop tracking history for your key. This will
// reset things like failure counts for per-item rate limiting
c.queue.Forget(key)
return true
}
// there was a failure so be sure to report it. This method allows for pluggable error handling
// which can be used for things like cluster-monitoring
utilruntime.HandleError(fmt.Errorf("%v failed with : %v", key, err))
// since we failed, we should requeue the item to work on later. This method will add a backoff
// to avoid hotlooping on particular items (they're probably still not going to work right away)
// and overall controller protection (everything I've done is broken, this controller needs to
// calm down or it can starve other useful work) cases.
c.queue.AddRateLimited(key)
return true
}
func (c *crdRegistrationController) enqueueCRD(crd *apiextensions.CustomResourceDefinition) {
for _, version := range crd.Spec.Versions {
c.queue.Add(schema.GroupVersion{Group: crd.Spec.Group, Version: version.Name})
}
}
func (c *crdRegistrationController) handleVersionUpdate(groupVersion schema.GroupVersion) error {
apiServiceName := groupVersion.Version + "." + groupVersion.Group
// check all CRDs. There shouldn't that many, but if we have problems later we can index them
crds, err := c.crdLister.List(labels.Everything())
if err != nil {
return err
}
for _, crd := range crds {
if crd.Spec.Group != groupVersion.Group {
continue
}
for _, version := range crd.Spec.Versions {
if version.Name != groupVersion.Version || !version.Served {
continue
}
c.apiServiceRegistration.AddAPIServiceToSync(&apiregistration.APIService{
ObjectMeta: metav1.ObjectMeta{Name: apiServiceName},
Spec: apiregistration.APIServiceSpec{
Group: groupVersion.Group,
Version: groupVersion.Version,
GroupPriorityMinimum: 1000, // CRDs should have relatively low priority
VersionPriority: 100, // CRDs will be sorted by kube-like versions like any other APIService with the same VersionPriority
},
})
return nil
}
}
c.apiServiceRegistration.RemoveAPIServiceToSync(apiServiceName)
return nil
}

View File

@ -1,129 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package crdregistration
import (
"reflect"
"testing"
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
crdlisters "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/tools/cache"
"k8s.io/kube-aggregator/pkg/apis/apiregistration"
)
func TestHandleVersionUpdate(t *testing.T) {
tests := []struct {
name string
startingCRDs []*apiextensions.CustomResourceDefinition
version schema.GroupVersion
expectedAdded []*apiregistration.APIService
expectedRemoved []string
}{
{
name: "simple add crd",
startingCRDs: []*apiextensions.CustomResourceDefinition{
{
Spec: apiextensions.CustomResourceDefinitionSpec{
Group: "group.com",
// Version field is deprecated and crd registration won't rely on it at all.
// defaulting route will fill up Versions field if user only provided version field.
Versions: []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1",
Served: true,
Storage: true,
},
},
},
},
},
version: schema.GroupVersion{Group: "group.com", Version: "v1"},
expectedAdded: []*apiregistration.APIService{
{
ObjectMeta: metav1.ObjectMeta{Name: "v1.group.com"},
Spec: apiregistration.APIServiceSpec{
Group: "group.com",
Version: "v1",
GroupPriorityMinimum: 1000,
VersionPriority: 100,
},
},
},
},
{
name: "simple remove crd",
startingCRDs: []*apiextensions.CustomResourceDefinition{
{
Spec: apiextensions.CustomResourceDefinitionSpec{
Group: "group.com",
Versions: []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1",
Served: true,
Storage: true,
},
},
},
},
},
version: schema.GroupVersion{Group: "group.com", Version: "v2"},
expectedRemoved: []string{"v2.group.com"},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
registration := &fakeAPIServiceRegistration{}
crdCache := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
crdLister := crdlisters.NewCustomResourceDefinitionLister(crdCache)
c := crdRegistrationController{
crdLister: crdLister,
apiServiceRegistration: registration,
}
for i := range test.startingCRDs {
crdCache.Add(test.startingCRDs[i])
}
c.handleVersionUpdate(test.version)
if !reflect.DeepEqual(test.expectedAdded, registration.added) {
t.Errorf("%s expected %v, got %v", test.name, test.expectedAdded, registration.added)
}
if !reflect.DeepEqual(test.expectedRemoved, registration.removed) {
t.Errorf("%s expected %v, got %v", test.name, test.expectedRemoved, registration.removed)
}
})
}
}
type fakeAPIServiceRegistration struct {
added []*apiregistration.APIService
removed []string
}
func (a *fakeAPIServiceRegistration) AddAPIServiceToSync(in *apiregistration.APIService) {
a.added = append(a.added, in)
}
func (a *fakeAPIServiceRegistration) RemoveAPIServiceToSync(name string) {
a.removed = append(a.removed, name)
}

View File

@ -1,948 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package master
import (
"net"
"reflect"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
core "k8s.io/client-go/testing"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/master/reconcilers"
)
func TestReconcileEndpoints(t *testing.T) {
ns := metav1.NamespaceDefault
om := func(name string) metav1.ObjectMeta {
return metav1.ObjectMeta{Namespace: ns, Name: name}
}
reconcile_tests := []struct {
testName string
serviceName string
ip string
endpointPorts []api.EndpointPort
additionalMasters int
endpoints *api.EndpointsList
expectUpdate *api.Endpoints // nil means none expected
expectCreate *api.Endpoints // nil means none expected
}{
{
testName: "no existing endpoints",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
endpoints: nil,
expectCreate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "existing endpoints satisfy",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
},
{
testName: "existing endpoints satisfy but too many",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}, {IP: "4.3.2.1"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "existing endpoints satisfy but too many + extra masters",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
additionalMasters: 3,
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{
{IP: "1.2.3.4"},
{IP: "4.3.2.1"},
{IP: "4.3.2.2"},
{IP: "4.3.2.3"},
{IP: "4.3.2.4"},
},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{
{IP: "1.2.3.4"},
{IP: "4.3.2.2"},
{IP: "4.3.2.3"},
{IP: "4.3.2.4"},
},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "existing endpoints satisfy but too many + extra masters + delete first",
serviceName: "foo",
ip: "4.3.2.4",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
additionalMasters: 3,
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{
{IP: "1.2.3.4"},
{IP: "4.3.2.1"},
{IP: "4.3.2.2"},
{IP: "4.3.2.3"},
{IP: "4.3.2.4"},
},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{
{IP: "4.3.2.1"},
{IP: "4.3.2.2"},
{IP: "4.3.2.3"},
{IP: "4.3.2.4"},
},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "existing endpoints satisfy and endpoint addresses length less than master count",
serviceName: "foo",
ip: "4.3.2.2",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
additionalMasters: 3,
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{
{IP: "4.3.2.1"},
{IP: "4.3.2.2"},
},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
expectUpdate: nil,
},
{
testName: "existing endpoints current IP missing and address length less than master count",
serviceName: "foo",
ip: "4.3.2.2",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
additionalMasters: 3,
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{
{IP: "4.3.2.1"},
},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{
{IP: "4.3.2.1"},
{IP: "4.3.2.2"},
},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "existing endpoints wrong name",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("bar"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
expectCreate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "existing endpoints wrong IP",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "4.3.2.1"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "existing endpoints wrong port",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 9090, Protocol: "TCP"}},
}},
}},
},
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "existing endpoints wrong protocol",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "UDP"}},
}},
}},
},
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "existing endpoints wrong port name",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{{Name: "baz", Port: 8080, Protocol: "TCP"}},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "baz", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "existing endpoints extra service ports satisfy",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{
{Name: "foo", Port: 8080, Protocol: "TCP"},
{Name: "bar", Port: 1000, Protocol: "TCP"},
{Name: "baz", Port: 1010, Protocol: "TCP"},
},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{
{Name: "foo", Port: 8080, Protocol: "TCP"},
{Name: "bar", Port: 1000, Protocol: "TCP"},
{Name: "baz", Port: 1010, Protocol: "TCP"},
},
}},
}},
},
},
{
testName: "existing endpoints extra service ports missing port",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{
{Name: "foo", Port: 8080, Protocol: "TCP"},
{Name: "bar", Port: 1000, Protocol: "TCP"},
},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{
{Name: "foo", Port: 8080, Protocol: "TCP"},
{Name: "bar", Port: 1000, Protocol: "TCP"},
},
}},
},
},
}
for _, test := range reconcile_tests {
fakeClient := fake.NewSimpleClientset()
if test.endpoints != nil {
fakeClient = fake.NewSimpleClientset(test.endpoints)
}
reconciler := reconcilers.NewMasterCountEndpointReconciler(test.additionalMasters+1, fakeClient.Core())
err := reconciler.ReconcileEndpoints(test.serviceName, net.ParseIP(test.ip), test.endpointPorts, true)
if err != nil {
t.Errorf("case %q: unexpected error: %v", test.testName, err)
}
updates := []core.UpdateAction{}
for _, action := range fakeClient.Actions() {
if action.GetVerb() != "update" {
continue
}
updates = append(updates, action.(core.UpdateAction))
}
if test.expectUpdate != nil {
if len(updates) != 1 {
t.Errorf("case %q: unexpected updates: %v", test.testName, updates)
} else if e, a := test.expectUpdate, updates[0].GetObject(); !reflect.DeepEqual(e, a) {
t.Errorf("case %q: expected update:\n%#v\ngot:\n%#v\n", test.testName, e, a)
}
}
if test.expectUpdate == nil && len(updates) > 0 {
t.Errorf("case %q: no update expected, yet saw: %v", test.testName, updates)
}
creates := []core.CreateAction{}
for _, action := range fakeClient.Actions() {
if action.GetVerb() != "create" {
continue
}
creates = append(creates, action.(core.CreateAction))
}
if test.expectCreate != nil {
if len(creates) != 1 {
t.Errorf("case %q: unexpected creates: %v", test.testName, creates)
} else if e, a := test.expectCreate, creates[0].GetObject(); !reflect.DeepEqual(e, a) {
t.Errorf("case %q: expected create:\n%#v\ngot:\n%#v\n", test.testName, e, a)
}
}
if test.expectCreate == nil && len(creates) > 0 {
t.Errorf("case %q: no create expected, yet saw: %v", test.testName, creates)
}
}
non_reconcile_tests := []struct {
testName string
serviceName string
ip string
endpointPorts []api.EndpointPort
additionalMasters int
endpoints *api.EndpointsList
expectUpdate *api.Endpoints // nil means none expected
expectCreate *api.Endpoints // nil means none expected
}{
{
testName: "existing endpoints extra service ports missing port no update",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{
{Name: "foo", Port: 8080, Protocol: "TCP"},
{Name: "bar", Port: 1000, Protocol: "TCP"},
},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
expectUpdate: nil,
},
{
testName: "existing endpoints extra service ports, wrong ports, wrong IP",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{
{Name: "foo", Port: 8080, Protocol: "TCP"},
{Name: "bar", Port: 1000, Protocol: "TCP"},
},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "4.3.2.1"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "no existing endpoints",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
endpoints: nil,
expectCreate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
}
for _, test := range non_reconcile_tests {
fakeClient := fake.NewSimpleClientset()
if test.endpoints != nil {
fakeClient = fake.NewSimpleClientset(test.endpoints)
}
reconciler := reconcilers.NewMasterCountEndpointReconciler(test.additionalMasters+1, fakeClient.Core())
err := reconciler.ReconcileEndpoints(test.serviceName, net.ParseIP(test.ip), test.endpointPorts, false)
if err != nil {
t.Errorf("case %q: unexpected error: %v", test.testName, err)
}
updates := []core.UpdateAction{}
for _, action := range fakeClient.Actions() {
if action.GetVerb() != "update" {
continue
}
updates = append(updates, action.(core.UpdateAction))
}
if test.expectUpdate != nil {
if len(updates) != 1 {
t.Errorf("case %q: unexpected updates: %v", test.testName, updates)
} else if e, a := test.expectUpdate, updates[0].GetObject(); !reflect.DeepEqual(e, a) {
t.Errorf("case %q: expected update:\n%#v\ngot:\n%#v\n", test.testName, e, a)
}
}
if test.expectUpdate == nil && len(updates) > 0 {
t.Errorf("case %q: no update expected, yet saw: %v", test.testName, updates)
}
creates := []core.CreateAction{}
for _, action := range fakeClient.Actions() {
if action.GetVerb() != "create" {
continue
}
creates = append(creates, action.(core.CreateAction))
}
if test.expectCreate != nil {
if len(creates) != 1 {
t.Errorf("case %q: unexpected creates: %v", test.testName, creates)
} else if e, a := test.expectCreate, creates[0].GetObject(); !reflect.DeepEqual(e, a) {
t.Errorf("case %q: expected create:\n%#v\ngot:\n%#v\n", test.testName, e, a)
}
}
if test.expectCreate == nil && len(creates) > 0 {
t.Errorf("case %q: no create expected, yet saw: %v", test.testName, creates)
}
}
}
func TestCreateOrUpdateMasterService(t *testing.T) {
ns := metav1.NamespaceDefault
om := func(name string) metav1.ObjectMeta {
return metav1.ObjectMeta{Namespace: ns, Name: name}
}
create_tests := []struct {
testName string
serviceName string
servicePorts []api.ServicePort
serviceType api.ServiceType
expectCreate *api.Service // nil means none expected
}{
{
testName: "service does not exist",
serviceName: "foo",
servicePorts: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
},
serviceType: api.ServiceTypeClusterIP,
expectCreate: &api.Service{
ObjectMeta: om("foo"),
Spec: api.ServiceSpec{
Ports: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
},
Selector: nil,
ClusterIP: "1.2.3.4",
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
},
},
},
}
for _, test := range create_tests {
master := Controller{}
fakeClient := fake.NewSimpleClientset()
master.ServiceClient = fakeClient.Core()
master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, net.ParseIP("1.2.3.4"), test.servicePorts, test.serviceType, false)
creates := []core.CreateAction{}
for _, action := range fakeClient.Actions() {
if action.GetVerb() == "create" {
creates = append(creates, action.(core.CreateAction))
}
}
if test.expectCreate != nil {
if len(creates) != 1 {
t.Errorf("case %q: unexpected creations: %v", test.testName, creates)
} else {
obj := creates[0].GetObject()
if e, a := test.expectCreate.Spec, obj.(*api.Service).Spec; !reflect.DeepEqual(e, a) {
t.Errorf("case %q: expected create:\n%#v\ngot:\n%#v\n", test.testName, e, a)
}
}
}
if test.expectCreate == nil && len(creates) > 1 {
t.Errorf("case %q: no create expected, yet saw: %v", test.testName, creates)
}
}
reconcile_tests := []struct {
testName string
serviceName string
servicePorts []api.ServicePort
serviceType api.ServiceType
service *api.Service
expectUpdate *api.Service // nil means none expected
}{
{
testName: "service definition wrong port",
serviceName: "foo",
servicePorts: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
},
serviceType: api.ServiceTypeClusterIP,
service: &api.Service{
ObjectMeta: om("foo"),
Spec: api.ServiceSpec{
Ports: []api.ServicePort{
{Name: "foo", Port: 8000, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
},
Selector: nil,
ClusterIP: "1.2.3.4",
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
},
},
expectUpdate: &api.Service{
ObjectMeta: om("foo"),
Spec: api.ServiceSpec{
Ports: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
},
Selector: nil,
ClusterIP: "1.2.3.4",
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
},
},
},
{
testName: "service definition missing port",
serviceName: "foo",
servicePorts: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
{Name: "baz", Port: 1000, Protocol: "TCP", TargetPort: intstr.FromInt(1000)},
},
serviceType: api.ServiceTypeClusterIP,
service: &api.Service{
ObjectMeta: om("foo"),
Spec: api.ServiceSpec{
Ports: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
},
Selector: nil,
ClusterIP: "1.2.3.4",
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
},
},
expectUpdate: &api.Service{
ObjectMeta: om("foo"),
Spec: api.ServiceSpec{
Ports: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
{Name: "baz", Port: 1000, Protocol: "TCP", TargetPort: intstr.FromInt(1000)},
},
Selector: nil,
ClusterIP: "1.2.3.4",
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
},
},
},
{
testName: "service definition incorrect port",
serviceName: "foo",
servicePorts: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
},
serviceType: api.ServiceTypeClusterIP,
service: &api.Service{
ObjectMeta: om("foo"),
Spec: api.ServiceSpec{
Ports: []api.ServicePort{
{Name: "bar", Port: 1000, Protocol: "UDP", TargetPort: intstr.FromInt(1000)},
},
Selector: nil,
ClusterIP: "1.2.3.4",
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
},
},
expectUpdate: &api.Service{
ObjectMeta: om("foo"),
Spec: api.ServiceSpec{
Ports: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
},
Selector: nil,
ClusterIP: "1.2.3.4",
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
},
},
},
{
testName: "service definition incorrect port name",
serviceName: "foo",
servicePorts: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
},
serviceType: api.ServiceTypeClusterIP,
service: &api.Service{
ObjectMeta: om("foo"),
Spec: api.ServiceSpec{
Ports: []api.ServicePort{
{Name: "foo", Port: 1000, Protocol: "UDP", TargetPort: intstr.FromInt(1000)},
},
Selector: nil,
ClusterIP: "1.2.3.4",
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
},
},
expectUpdate: &api.Service{
ObjectMeta: om("foo"),
Spec: api.ServiceSpec{
Ports: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
},
Selector: nil,
ClusterIP: "1.2.3.4",
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
},
},
},
{
testName: "service definition incorrect target port",
serviceName: "foo",
servicePorts: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
},
serviceType: api.ServiceTypeClusterIP,
service: &api.Service{
ObjectMeta: om("foo"),
Spec: api.ServiceSpec{
Ports: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(1000)},
},
Selector: nil,
ClusterIP: "1.2.3.4",
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
},
},
expectUpdate: &api.Service{
ObjectMeta: om("foo"),
Spec: api.ServiceSpec{
Ports: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
},
Selector: nil,
ClusterIP: "1.2.3.4",
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
},
},
},
{
testName: "service definition incorrect protocol",
serviceName: "foo",
servicePorts: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
},
serviceType: api.ServiceTypeClusterIP,
service: &api.Service{
ObjectMeta: om("foo"),
Spec: api.ServiceSpec{
Ports: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "UDP", TargetPort: intstr.FromInt(8080)},
},
Selector: nil,
ClusterIP: "1.2.3.4",
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
},
},
expectUpdate: &api.Service{
ObjectMeta: om("foo"),
Spec: api.ServiceSpec{
Ports: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
},
Selector: nil,
ClusterIP: "1.2.3.4",
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
},
},
},
{
testName: "service definition has incorrect type",
serviceName: "foo",
servicePorts: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
},
serviceType: api.ServiceTypeClusterIP,
service: &api.Service{
ObjectMeta: om("foo"),
Spec: api.ServiceSpec{
Ports: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
},
Selector: nil,
ClusterIP: "1.2.3.4",
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeNodePort,
},
},
expectUpdate: &api.Service{
ObjectMeta: om("foo"),
Spec: api.ServiceSpec{
Ports: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
},
Selector: nil,
ClusterIP: "1.2.3.4",
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
},
},
},
{
testName: "service definition satisfies",
serviceName: "foo",
servicePorts: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
},
serviceType: api.ServiceTypeClusterIP,
service: &api.Service{
ObjectMeta: om("foo"),
Spec: api.ServiceSpec{
Ports: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
},
Selector: nil,
ClusterIP: "1.2.3.4",
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
},
},
expectUpdate: nil,
},
}
for _, test := range reconcile_tests {
master := Controller{}
fakeClient := fake.NewSimpleClientset(test.service)
master.ServiceClient = fakeClient.Core()
err := master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, net.ParseIP("1.2.3.4"), test.servicePorts, test.serviceType, true)
if err != nil {
t.Errorf("case %q: unexpected error: %v", test.testName, err)
}
updates := []core.UpdateAction{}
for _, action := range fakeClient.Actions() {
if action.GetVerb() == "update" {
updates = append(updates, action.(core.UpdateAction))
}
}
if test.expectUpdate != nil {
if len(updates) != 1 {
t.Errorf("case %q: unexpected updates: %v", test.testName, updates)
} else {
obj := updates[0].GetObject()
if e, a := test.expectUpdate.Spec, obj.(*api.Service).Spec; !reflect.DeepEqual(e, a) {
t.Errorf("case %q: expected update:\n%#v\ngot:\n%#v\n", test.testName, e, a)
}
}
}
if test.expectUpdate == nil && len(updates) > 0 {
t.Errorf("case %q: no update expected, yet saw: %v", test.testName, updates)
}
}
non_reconcile_tests := []struct {
testName string
serviceName string
servicePorts []api.ServicePort
serviceType api.ServiceType
service *api.Service
expectUpdate *api.Service // nil means none expected
}{
{
testName: "service definition wrong port, no expected update",
serviceName: "foo",
servicePorts: []api.ServicePort{
{Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
},
serviceType: api.ServiceTypeClusterIP,
service: &api.Service{
ObjectMeta: om("foo"),
Spec: api.ServiceSpec{
Ports: []api.ServicePort{
{Name: "foo", Port: 1000, Protocol: "TCP", TargetPort: intstr.FromInt(1000)},
},
Selector: nil,
ClusterIP: "1.2.3.4",
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
},
},
expectUpdate: nil,
},
}
for _, test := range non_reconcile_tests {
master := Controller{}
fakeClient := fake.NewSimpleClientset(test.service)
master.ServiceClient = fakeClient.Core()
err := master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, net.ParseIP("1.2.3.4"), test.servicePorts, test.serviceType, false)
if err != nil {
t.Errorf("case %q: unexpected error: %v", test.testName, err)
}
updates := []core.UpdateAction{}
for _, action := range fakeClient.Actions() {
if action.GetVerb() == "update" {
updates = append(updates, action.(core.UpdateAction))
}
}
if test.expectUpdate != nil {
if len(updates) != 1 {
t.Errorf("case %q: unexpected updates: %v", test.testName, updates)
} else {
obj := updates[0].GetObject()
if e, a := test.expectUpdate.Spec, obj.(*api.Service).Spec; !reflect.DeepEqual(e, a) {
t.Errorf("case %q: expected update:\n%#v\ngot:\n%#v\n", test.testName, e, a)
}
}
}
if test.expectUpdate == nil && len(updates) > 0 {
t.Errorf("case %q: no update expected, yet saw: %v", test.testName, updates)
}
}
}

View File

@ -1,19 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package master contains code for setting up and running a Kubernetes
// cluster master.
package master // import "k8s.io/kubernetes/pkg/master"

View File

@ -1,40 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package master
// These imports are the API groups the API server will support.
import (
_ "k8s.io/kubernetes/pkg/apis/admission/install"
_ "k8s.io/kubernetes/pkg/apis/admissionregistration/install"
_ "k8s.io/kubernetes/pkg/apis/apps/install"
_ "k8s.io/kubernetes/pkg/apis/authentication/install"
_ "k8s.io/kubernetes/pkg/apis/authorization/install"
_ "k8s.io/kubernetes/pkg/apis/autoscaling/install"
_ "k8s.io/kubernetes/pkg/apis/batch/install"
_ "k8s.io/kubernetes/pkg/apis/certificates/install"
_ "k8s.io/kubernetes/pkg/apis/componentconfig/install"
_ "k8s.io/kubernetes/pkg/apis/core/install"
_ "k8s.io/kubernetes/pkg/apis/events/install"
_ "k8s.io/kubernetes/pkg/apis/extensions/install"
_ "k8s.io/kubernetes/pkg/apis/imagepolicy/install"
_ "k8s.io/kubernetes/pkg/apis/networking/install"
_ "k8s.io/kubernetes/pkg/apis/policy/install"
_ "k8s.io/kubernetes/pkg/apis/rbac/install"
_ "k8s.io/kubernetes/pkg/apis/scheduling/install"
_ "k8s.io/kubernetes/pkg/apis/settings/install"
_ "k8s.io/kubernetes/pkg/apis/storage/install"
)

View File

@ -1,187 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package master
import (
"encoding/json"
"reflect"
"strings"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/api/legacyscheme"
)
func TestGroupVersions(t *testing.T) {
// legacyUnsuffixedGroups contains the groups released prior to deciding that kubernetes API groups should be dns-suffixed
// new groups should be suffixed with ".k8s.io" (https://github.com/kubernetes/kubernetes/pull/31887#issuecomment-244462396)
legacyUnsuffixedGroups := sets.NewString(
"",
"apps",
"autoscaling",
"batch",
"componentconfig",
"extensions",
"policy",
)
// No new groups should be added to the legacyUnsuffixedGroups exclusion list
if len(legacyUnsuffixedGroups) != 7 {
t.Errorf("No additional unnamespaced groups should be created")
}
for _, gv := range legacyscheme.Scheme.PrioritizedVersionsAllGroups() {
if !strings.HasSuffix(gv.Group, ".k8s.io") && !legacyUnsuffixedGroups.Has(gv.Group) {
t.Errorf("Group %s does not have the standard kubernetes API group suffix of .k8s.io", gv.Group)
}
}
}
func TestTypeTags(t *testing.T) {
for gvk, knownType := range legacyscheme.Scheme.AllKnownTypes() {
if gvk.Version == runtime.APIVersionInternal {
ensureNoTags(t, gvk, knownType, nil)
} else {
ensureTags(t, gvk, knownType, nil)
}
}
}
// These types are registered in external versions, and therefore include json tags,
// but are also registered in internal versions (or referenced from internal types),
// so we explicitly allow tags for them
var typesAllowedTags = map[reflect.Type]bool{
reflect.TypeOf(intstr.IntOrString{}): true,
reflect.TypeOf(metav1.Time{}): true,
reflect.TypeOf(metav1.MicroTime{}): true,
reflect.TypeOf(metav1.Duration{}): true,
reflect.TypeOf(metav1.TypeMeta{}): true,
reflect.TypeOf(metav1.ListMeta{}): true,
reflect.TypeOf(metav1.ObjectMeta{}): true,
reflect.TypeOf(metav1.OwnerReference{}): true,
reflect.TypeOf(metav1.LabelSelector{}): true,
reflect.TypeOf(metav1.GetOptions{}): true,
reflect.TypeOf(metav1.ExportOptions{}): true,
reflect.TypeOf(metav1.ListOptions{}): true,
reflect.TypeOf(metav1.DeleteOptions{}): true,
reflect.TypeOf(metav1.GroupVersionKind{}): true,
reflect.TypeOf(metav1.GroupVersionResource{}): true,
reflect.TypeOf(metav1.Status{}): true,
}
func ensureNoTags(t *testing.T, gvk schema.GroupVersionKind, tp reflect.Type, parents []reflect.Type) {
if _, ok := typesAllowedTags[tp]; ok {
return
}
parents = append(parents, tp)
switch tp.Kind() {
case reflect.Map, reflect.Slice, reflect.Ptr:
ensureNoTags(t, gvk, tp.Elem(), parents)
case reflect.String, reflect.Bool, reflect.Float32, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uintptr, reflect.Uint32, reflect.Uint64, reflect.Interface:
// no-op
case reflect.Struct:
for i := 0; i < tp.NumField(); i++ {
f := tp.Field(i)
if f.PkgPath != "" {
continue // Ignore unexported fields
}
jsonTag := f.Tag.Get("json")
protoTag := f.Tag.Get("protobuf")
if len(jsonTag) > 0 || len(protoTag) > 0 {
t.Errorf("Internal types should not have json or protobuf tags. %#v has tag on field %v: %v", gvk, f.Name, f.Tag)
for i, tp := range parents {
t.Logf("%s%v:", strings.Repeat(" ", i), tp)
}
}
ensureNoTags(t, gvk, f.Type, parents)
}
default:
t.Errorf("Unexpected type %v in %#v", tp.Kind(), gvk)
for i, tp := range parents {
t.Logf("%s%v:", strings.Repeat(" ", i), tp)
}
}
}
var (
marshalerType = reflect.TypeOf((*json.Marshaler)(nil)).Elem()
unmarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()
)
// These fields are limited exceptions to the standard JSON naming structure.
// Additions should only be made if a non-standard field name was released and cannot be changed for compatibility reasons.
var allowedNonstandardJSONNames = map[reflect.Type]string{
reflect.TypeOf(v1.DaemonEndpoint{}): "Port",
}
func ensureTags(t *testing.T, gvk schema.GroupVersionKind, tp reflect.Type, parents []reflect.Type) {
// This type handles its own encoding/decoding and doesn't need json tags
if tp.Implements(marshalerType) && (tp.Implements(unmarshalerType) || reflect.PtrTo(tp).Implements(unmarshalerType)) {
return
}
parents = append(parents, tp)
switch tp.Kind() {
case reflect.Map, reflect.Slice, reflect.Ptr:
ensureTags(t, gvk, tp.Elem(), parents)
case reflect.String, reflect.Bool, reflect.Float32, reflect.Int, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uintptr, reflect.Uint32, reflect.Uint64, reflect.Interface:
// no-op
case reflect.Struct:
for i := 0; i < tp.NumField(); i++ {
f := tp.Field(i)
jsonTag := f.Tag.Get("json")
if len(jsonTag) == 0 {
t.Errorf("External types should have json tags. %#v tags on field %v are: %s", gvk, f.Name, f.Tag)
for i, tp := range parents {
t.Logf("%s%v", strings.Repeat(" ", i), tp)
}
}
jsonTagName := strings.Split(jsonTag, ",")[0]
if len(jsonTagName) > 0 && (jsonTagName[0] < 'a' || jsonTagName[0] > 'z') && jsonTagName != "-" && allowedNonstandardJSONNames[tp] != jsonTagName {
t.Errorf("External types should have json names starting with lowercase letter. %#v has json tag on field %v with name %s", gvk, f.Name, jsonTagName)
t.Log(tp)
t.Log(allowedNonstandardJSONNames[tp])
for i, tp := range parents {
t.Logf("%s%v", strings.Repeat(" ", i), tp)
}
}
ensureTags(t, gvk, f.Type, parents)
}
default:
t.Errorf("Unexpected type %v in %#v", tp.Kind(), gvk)
for i, tp := range parents {
t.Logf("%s%v:", strings.Repeat(" ", i), tp)
}
}
}

View File

@ -1,501 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package master
import (
"fmt"
"net"
"net/http"
"reflect"
"strconv"
"time"
admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
appsv1 "k8s.io/api/apps/v1"
appsv1beta1 "k8s.io/api/apps/v1beta1"
appsv1beta2 "k8s.io/api/apps/v1beta2"
authenticationv1 "k8s.io/api/authentication/v1"
authenticationv1beta1 "k8s.io/api/authentication/v1beta1"
authorizationapiv1 "k8s.io/api/authorization/v1"
authorizationapiv1beta1 "k8s.io/api/authorization/v1beta1"
autoscalingapiv1 "k8s.io/api/autoscaling/v1"
autoscalingapiv2beta1 "k8s.io/api/autoscaling/v2beta1"
batchapiv1 "k8s.io/api/batch/v1"
batchapiv1beta1 "k8s.io/api/batch/v1beta1"
batchapiv2alpha1 "k8s.io/api/batch/v2alpha1"
certificatesapiv1beta1 "k8s.io/api/certificates/v1beta1"
apiv1 "k8s.io/api/core/v1"
eventsv1beta1 "k8s.io/api/events/v1beta1"
extensionsapiv1beta1 "k8s.io/api/extensions/v1beta1"
networkingapiv1 "k8s.io/api/networking/v1"
policyapiv1beta1 "k8s.io/api/policy/v1beta1"
rbacv1 "k8s.io/api/rbac/v1"
rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
schedulingapiv1beta1 "k8s.io/api/scheduling/v1beta1"
settingsv1alpha1 "k8s.io/api/settings/v1alpha1"
storageapiv1 "k8s.io/api/storage/v1"
storageapiv1alpha1 "k8s.io/api/storage/v1alpha1"
storageapiv1beta1 "k8s.io/api/storage/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apiserver/pkg/endpoints/discovery"
"k8s.io/apiserver/pkg/registry/generic"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/healthz"
serverstorage "k8s.io/apiserver/pkg/server/storage"
storagefactory "k8s.io/apiserver/pkg/storage/storagebackend/factory"
"k8s.io/client-go/informers"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
api "k8s.io/kubernetes/pkg/apis/core"
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options"
kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
"k8s.io/kubernetes/pkg/master/reconcilers"
"k8s.io/kubernetes/pkg/master/tunneler"
"k8s.io/kubernetes/pkg/registry/core/endpoint"
endpointsstorage "k8s.io/kubernetes/pkg/registry/core/endpoint/storage"
"k8s.io/kubernetes/pkg/routes"
"k8s.io/kubernetes/pkg/serviceaccount"
nodeutil "k8s.io/kubernetes/pkg/util/node"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
// RESTStorage installers
admissionregistrationrest "k8s.io/kubernetes/pkg/registry/admissionregistration/rest"
appsrest "k8s.io/kubernetes/pkg/registry/apps/rest"
authenticationrest "k8s.io/kubernetes/pkg/registry/authentication/rest"
authorizationrest "k8s.io/kubernetes/pkg/registry/authorization/rest"
autoscalingrest "k8s.io/kubernetes/pkg/registry/autoscaling/rest"
batchrest "k8s.io/kubernetes/pkg/registry/batch/rest"
certificatesrest "k8s.io/kubernetes/pkg/registry/certificates/rest"
corerest "k8s.io/kubernetes/pkg/registry/core/rest"
eventsrest "k8s.io/kubernetes/pkg/registry/events/rest"
extensionsrest "k8s.io/kubernetes/pkg/registry/extensions/rest"
networkingrest "k8s.io/kubernetes/pkg/registry/networking/rest"
policyrest "k8s.io/kubernetes/pkg/registry/policy/rest"
rbacrest "k8s.io/kubernetes/pkg/registry/rbac/rest"
schedulingrest "k8s.io/kubernetes/pkg/registry/scheduling/rest"
settingsrest "k8s.io/kubernetes/pkg/registry/settings/rest"
storagerest "k8s.io/kubernetes/pkg/registry/storage/rest"
)
const (
// DefaultEndpointReconcilerInterval is the default amount of time for how often the endpoints for
// the kubernetes Service are reconciled.
DefaultEndpointReconcilerInterval = 10 * time.Second
// DefaultEndpointReconcilerTTL is the default TTL timeout for the storage layer
DefaultEndpointReconcilerTTL = 15 * time.Second
)
type ExtraConfig struct {
ClientCARegistrationHook ClientCARegistrationHook
APIResourceConfigSource serverstorage.APIResourceConfigSource
StorageFactory serverstorage.StorageFactory
EndpointReconcilerConfig EndpointReconcilerConfig
EventTTL time.Duration
KubeletClientConfig kubeletclient.KubeletClientConfig
// Used to start and monitor tunneling
Tunneler tunneler.Tunneler
EnableLogsSupport bool
ProxyTransport http.RoundTripper
// Values to build the IP addresses used by discovery
// The range of IPs to be assigned to services with type=ClusterIP or greater
ServiceIPRange net.IPNet
// The IP address for the GenericAPIServer service (must be inside ServiceIPRange)
APIServerServiceIP net.IP
// Port for the apiserver service.
APIServerServicePort int
// TODO, we can probably group service related items into a substruct to make it easier to configure
// the API server items and `Extra*` fields likely fit nicely together.
// The range of ports to be assigned to services with type=NodePort or greater
ServiceNodePortRange utilnet.PortRange
// Additional ports to be exposed on the GenericAPIServer service
// extraServicePorts is injectable in the event that more ports
// (other than the default 443/tcp) are exposed on the GenericAPIServer
// and those ports need to be load balanced by the GenericAPIServer
// service because this pkg is linked by out-of-tree projects
// like openshift which want to use the GenericAPIServer but also do
// more stuff.
ExtraServicePorts []api.ServicePort
// Additional ports to be exposed on the GenericAPIServer endpoints
// Port names should align with ports defined in ExtraServicePorts
ExtraEndpointPorts []api.EndpointPort
// If non-zero, the "kubernetes" services uses this port as NodePort.
KubernetesServiceNodePort int
// Number of masters running; all masters must be started with the
// same value for this field. (Numbers > 1 currently untested.)
MasterCount int
// MasterEndpointReconcileTTL sets the time to live in seconds of an
// endpoint record recorded by each master. The endpoints are checked at an
// interval that is 2/3 of this value and this value defaults to 15s if
// unset. In very large clusters, this value may be increased to reduce the
// possibility that the master endpoint record expires (due to other load
// on the etcd server) and causes masters to drop in and out of the
// kubernetes service record. It is not recommended to set this value below
// 15s.
MasterEndpointReconcileTTL time.Duration
// Selects which reconciler to use
EndpointReconcilerType reconcilers.Type
ServiceAccountIssuer serviceaccount.TokenGenerator
ServiceAccountAPIAudiences []string
}
type Config struct {
GenericConfig *genericapiserver.Config
ExtraConfig ExtraConfig
}
type completedConfig struct {
GenericConfig genericapiserver.CompletedConfig
ExtraConfig *ExtraConfig
}
type CompletedConfig struct {
// Embed a private pointer that cannot be instantiated outside of this package.
*completedConfig
}
// EndpointReconcilerConfig holds the endpoint reconciler and endpoint reconciliation interval to be
// used by the master.
type EndpointReconcilerConfig struct {
Reconciler reconcilers.EndpointReconciler
Interval time.Duration
}
// Master contains state for a Kubernetes cluster master/api server.
type Master struct {
GenericAPIServer *genericapiserver.GenericAPIServer
ClientCARegistrationHook ClientCARegistrationHook
}
func (c *Config) createMasterCountReconciler() reconcilers.EndpointReconciler {
endpointClient := coreclient.NewForConfigOrDie(c.GenericConfig.LoopbackClientConfig)
return reconcilers.NewMasterCountEndpointReconciler(c.ExtraConfig.MasterCount, endpointClient)
}
func (c *Config) createNoneReconciler() reconcilers.EndpointReconciler {
return reconcilers.NewNoneEndpointReconciler()
}
func (c *Config) createLeaseReconciler() reconcilers.EndpointReconciler {
ttl := c.ExtraConfig.MasterEndpointReconcileTTL
config, err := c.ExtraConfig.StorageFactory.NewConfig(api.Resource("apiServerIPInfo"))
if err != nil {
glog.Fatalf("Error determining service IP ranges: %v", err)
}
leaseStorage, _, err := storagefactory.Create(*config)
if err != nil {
glog.Fatalf("Error creating storage factory: %v", err)
}
endpointConfig, err := c.ExtraConfig.StorageFactory.NewConfig(api.Resource("endpoints"))
if err != nil {
glog.Fatalf("Error getting storage config: %v", err)
}
endpointsStorage := endpointsstorage.NewREST(generic.RESTOptions{
StorageConfig: endpointConfig,
Decorator: generic.UndecoratedStorage,
DeleteCollectionWorkers: 0,
ResourcePrefix: c.ExtraConfig.StorageFactory.ResourcePrefix(api.Resource("endpoints")),
})
endpointRegistry := endpoint.NewRegistry(endpointsStorage)
masterLeases := reconcilers.NewLeases(leaseStorage, "/masterleases/", ttl)
return reconcilers.NewLeaseEndpointReconciler(endpointRegistry, masterLeases)
}
func (c *Config) createEndpointReconciler() reconcilers.EndpointReconciler {
glog.Infof("Using reconciler: %v", c.ExtraConfig.EndpointReconcilerType)
switch c.ExtraConfig.EndpointReconcilerType {
// there are numerous test dependencies that depend on a default controller
case "", reconcilers.MasterCountReconcilerType:
return c.createMasterCountReconciler()
case reconcilers.LeaseEndpointReconcilerType:
return c.createLeaseReconciler()
case reconcilers.NoneEndpointReconcilerType:
return c.createNoneReconciler()
default:
glog.Fatalf("Reconciler not implemented: %v", c.ExtraConfig.EndpointReconcilerType)
}
return nil
}
// Complete fills in any fields not set that are required to have valid data. It's mutating the receiver.
func (cfg *Config) Complete(informers informers.SharedInformerFactory) CompletedConfig {
c := completedConfig{
cfg.GenericConfig.Complete(informers),
&cfg.ExtraConfig,
}
serviceIPRange, apiServerServiceIP, err := DefaultServiceIPRange(c.ExtraConfig.ServiceIPRange)
if err != nil {
glog.Fatalf("Error determining service IP ranges: %v", err)
}
if c.ExtraConfig.ServiceIPRange.IP == nil {
c.ExtraConfig.ServiceIPRange = serviceIPRange
}
if c.ExtraConfig.APIServerServiceIP == nil {
c.ExtraConfig.APIServerServiceIP = apiServerServiceIP
}
discoveryAddresses := discovery.DefaultAddresses{DefaultAddress: c.GenericConfig.ExternalAddress}
discoveryAddresses.CIDRRules = append(discoveryAddresses.CIDRRules,
discovery.CIDRRule{IPRange: c.ExtraConfig.ServiceIPRange, Address: net.JoinHostPort(c.ExtraConfig.APIServerServiceIP.String(), strconv.Itoa(c.ExtraConfig.APIServerServicePort))})
c.GenericConfig.DiscoveryAddresses = discoveryAddresses
if c.ExtraConfig.ServiceNodePortRange.Size == 0 {
// TODO: Currently no way to specify an empty range (do we need to allow this?)
// We should probably allow this for clouds that don't require NodePort to do load-balancing (GCE)
// but then that breaks the strict nestedness of ServiceType.
// Review post-v1
c.ExtraConfig.ServiceNodePortRange = kubeoptions.DefaultServiceNodePortRange
glog.Infof("Node port range unspecified. Defaulting to %v.", c.ExtraConfig.ServiceNodePortRange)
}
if c.ExtraConfig.EndpointReconcilerConfig.Interval == 0 {
c.ExtraConfig.EndpointReconcilerConfig.Interval = DefaultEndpointReconcilerInterval
}
if c.ExtraConfig.MasterEndpointReconcileTTL == 0 {
c.ExtraConfig.MasterEndpointReconcileTTL = DefaultEndpointReconcilerTTL
}
if c.ExtraConfig.EndpointReconcilerConfig.Reconciler == nil {
c.ExtraConfig.EndpointReconcilerConfig.Reconciler = cfg.createEndpointReconciler()
}
return CompletedConfig{&c}
}
// New returns a new instance of Master from the given config.
// Certain config fields will be set to a default value if unset.
// Certain config fields must be specified, including:
// KubeletClientConfig
func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) (*Master, error) {
if reflect.DeepEqual(c.ExtraConfig.KubeletClientConfig, kubeletclient.KubeletClientConfig{}) {
return nil, fmt.Errorf("Master.New() called with empty config.KubeletClientConfig")
}
s, err := c.GenericConfig.New("kube-apiserver", delegationTarget)
if err != nil {
return nil, err
}
if c.ExtraConfig.EnableLogsSupport {
routes.Logs{}.Install(s.Handler.GoRestfulContainer)
}
m := &Master{
GenericAPIServer: s,
}
// install legacy rest storage
if c.ExtraConfig.APIResourceConfigSource.VersionEnabled(apiv1.SchemeGroupVersion) {
legacyRESTStorageProvider := corerest.LegacyRESTStorageProvider{
StorageFactory: c.ExtraConfig.StorageFactory,
ProxyTransport: c.ExtraConfig.ProxyTransport,
KubeletClientConfig: c.ExtraConfig.KubeletClientConfig,
EventTTL: c.ExtraConfig.EventTTL,
ServiceIPRange: c.ExtraConfig.ServiceIPRange,
ServiceNodePortRange: c.ExtraConfig.ServiceNodePortRange,
LoopbackClientConfig: c.GenericConfig.LoopbackClientConfig,
ServiceAccountIssuer: c.ExtraConfig.ServiceAccountIssuer,
ServiceAccountAPIAudiences: c.ExtraConfig.ServiceAccountAPIAudiences,
}
m.InstallLegacyAPI(&c, c.GenericConfig.RESTOptionsGetter, legacyRESTStorageProvider)
}
// The order here is preserved in discovery.
// If resources with identical names exist in more than one of these groups (e.g. "deployments.apps"" and "deployments.extensions"),
// the order of this list determines which group an unqualified resource name (e.g. "deployments") should prefer.
// This priority order is used for local discovery, but it ends up aggregated in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go
// with specific priorities.
// TODO: describe the priority all the way down in the RESTStorageProviders and plumb it back through the various discovery
// handlers that we have.
restStorageProviders := []RESTStorageProvider{
authenticationrest.RESTStorageProvider{Authenticator: c.GenericConfig.Authentication.Authenticator},
authorizationrest.RESTStorageProvider{Authorizer: c.GenericConfig.Authorization.Authorizer, RuleResolver: c.GenericConfig.RuleResolver},
autoscalingrest.RESTStorageProvider{},
batchrest.RESTStorageProvider{},
certificatesrest.RESTStorageProvider{},
extensionsrest.RESTStorageProvider{},
networkingrest.RESTStorageProvider{},
policyrest.RESTStorageProvider{},
rbacrest.RESTStorageProvider{Authorizer: c.GenericConfig.Authorization.Authorizer},
schedulingrest.RESTStorageProvider{},
settingsrest.RESTStorageProvider{},
storagerest.RESTStorageProvider{},
// keep apps after extensions so legacy clients resolve the extensions versions of shared resource names.
// See https://github.com/kubernetes/kubernetes/issues/42392
appsrest.RESTStorageProvider{},
admissionregistrationrest.RESTStorageProvider{},
eventsrest.RESTStorageProvider{TTL: c.ExtraConfig.EventTTL},
}
m.InstallAPIs(c.ExtraConfig.APIResourceConfigSource, c.GenericConfig.RESTOptionsGetter, restStorageProviders...)
if c.ExtraConfig.Tunneler != nil {
m.installTunneler(c.ExtraConfig.Tunneler, corev1client.NewForConfigOrDie(c.GenericConfig.LoopbackClientConfig).Nodes())
}
m.GenericAPIServer.AddPostStartHookOrDie("ca-registration", c.ExtraConfig.ClientCARegistrationHook.PostStartHook)
return m, nil
}
func (m *Master) InstallLegacyAPI(c *completedConfig, restOptionsGetter generic.RESTOptionsGetter, legacyRESTStorageProvider corerest.LegacyRESTStorageProvider) {
legacyRESTStorage, apiGroupInfo, err := legacyRESTStorageProvider.NewLegacyRESTStorage(restOptionsGetter)
if err != nil {
glog.Fatalf("Error building core storage: %v", err)
}
controllerName := "bootstrap-controller"
coreClient := coreclient.NewForConfigOrDie(c.GenericConfig.LoopbackClientConfig)
bootstrapController := c.NewBootstrapController(legacyRESTStorage, coreClient, coreClient, coreClient)
m.GenericAPIServer.AddPostStartHookOrDie(controllerName, bootstrapController.PostStartHook)
m.GenericAPIServer.AddPreShutdownHookOrDie(controllerName, bootstrapController.PreShutdownHook)
if err := m.GenericAPIServer.InstallLegacyAPIGroup(genericapiserver.DefaultLegacyAPIPrefix, &apiGroupInfo); err != nil {
glog.Fatalf("Error in registering group versions: %v", err)
}
}
func (m *Master) installTunneler(nodeTunneler tunneler.Tunneler, nodeClient corev1client.NodeInterface) {
nodeTunneler.Run(nodeAddressProvider{nodeClient}.externalAddresses)
m.GenericAPIServer.AddHealthzChecks(healthz.NamedCheck("SSH Tunnel Check", tunneler.TunnelSyncHealthChecker(nodeTunneler)))
prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "apiserver_proxy_tunnel_sync_latency_secs",
Help: "The time since the last successful synchronization of the SSH tunnels for proxy requests.",
}, func() float64 { return float64(nodeTunneler.SecondsSinceSync()) })
}
// RESTStorageProvider is a factory type for REST storage.
type RESTStorageProvider interface {
GroupName() string
NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool)
}
// InstallAPIs will install the APIs for the restStorageProviders if they are enabled.
func (m *Master) InstallAPIs(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter, restStorageProviders ...RESTStorageProvider) {
apiGroupsInfo := []genericapiserver.APIGroupInfo{}
for _, restStorageBuilder := range restStorageProviders {
groupName := restStorageBuilder.GroupName()
if !apiResourceConfigSource.AnyVersionForGroupEnabled(groupName) {
glog.V(1).Infof("Skipping disabled API group %q.", groupName)
continue
}
apiGroupInfo, enabled := restStorageBuilder.NewRESTStorage(apiResourceConfigSource, restOptionsGetter)
if !enabled {
glog.Warningf("Problem initializing API group %q, skipping.", groupName)
continue
}
glog.V(1).Infof("Enabling API group %q.", groupName)
if postHookProvider, ok := restStorageBuilder.(genericapiserver.PostStartHookProvider); ok {
name, hook, err := postHookProvider.PostStartHook()
if err != nil {
glog.Fatalf("Error building PostStartHook: %v", err)
}
m.GenericAPIServer.AddPostStartHookOrDie(name, hook)
}
apiGroupsInfo = append(apiGroupsInfo, apiGroupInfo)
}
for i := range apiGroupsInfo {
if err := m.GenericAPIServer.InstallAPIGroup(&apiGroupsInfo[i]); err != nil {
glog.Fatalf("Error in registering group versions: %v", err)
}
}
}
type nodeAddressProvider struct {
nodeClient corev1client.NodeInterface
}
func (n nodeAddressProvider) externalAddresses() ([]string, error) {
preferredAddressTypes := []apiv1.NodeAddressType{
apiv1.NodeExternalIP,
}
nodes, err := n.nodeClient.List(metav1.ListOptions{})
if err != nil {
return nil, err
}
addrs := []string{}
for ix := range nodes.Items {
node := &nodes.Items[ix]
addr, err := nodeutil.GetPreferredNodeAddress(node, preferredAddressTypes)
if err != nil {
return nil, err
}
addrs = append(addrs, addr)
}
return addrs, nil
}
func DefaultAPIResourceConfigSource() *serverstorage.ResourceConfig {
ret := serverstorage.NewResourceConfig()
// NOTE: GroupVersions listed here will be enabled by default. Don't put alpha versions in the list.
ret.EnableVersions(
admissionregistrationv1beta1.SchemeGroupVersion,
apiv1.SchemeGroupVersion,
appsv1beta1.SchemeGroupVersion,
appsv1beta2.SchemeGroupVersion,
appsv1.SchemeGroupVersion,
authenticationv1.SchemeGroupVersion,
authenticationv1beta1.SchemeGroupVersion,
authorizationapiv1.SchemeGroupVersion,
authorizationapiv1beta1.SchemeGroupVersion,
autoscalingapiv1.SchemeGroupVersion,
autoscalingapiv2beta1.SchemeGroupVersion,
batchapiv1.SchemeGroupVersion,
batchapiv1beta1.SchemeGroupVersion,
certificatesapiv1beta1.SchemeGroupVersion,
eventsv1beta1.SchemeGroupVersion,
extensionsapiv1beta1.SchemeGroupVersion,
networkingapiv1.SchemeGroupVersion,
policyapiv1beta1.SchemeGroupVersion,
rbacv1.SchemeGroupVersion,
rbacv1beta1.SchemeGroupVersion,
storageapiv1.SchemeGroupVersion,
storageapiv1beta1.SchemeGroupVersion,
schedulingapiv1beta1.SchemeGroupVersion,
)
// disable alpha versions explicitly so we have a full list of what's possible to serve
ret.DisableVersions(
admissionregistrationv1alpha1.SchemeGroupVersion,
batchapiv2alpha1.SchemeGroupVersion,
rbacv1alpha1.SchemeGroupVersion,
schedulingv1alpha1.SchemeGroupVersion,
settingsv1alpha1.SchemeGroupVersion,
storageapiv1alpha1.SchemeGroupVersion,
)
return ret
}

View File

@ -1,97 +0,0 @@
// +build !race
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package master
// This test file is separated from master_test.go so we would be able to disable
// race check for it. TestValidOpenAPISpec will became extremely slow if -race
// flag exists, and will cause the tests to timeout.
import (
"net/http"
"net/http/httptest"
"testing"
openapinamer "k8s.io/apiserver/pkg/endpoints/openapi"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/kubernetes/pkg/api/legacyscheme"
openapigen "k8s.io/kubernetes/pkg/generated/openapi"
"github.com/go-openapi/loads"
"github.com/go-openapi/spec"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/validate"
)
// TestValidOpenAPISpec verifies that the open api is added
// at the proper endpoint and the spec is valid.
func TestValidOpenAPISpec(t *testing.T) {
etcdserver, config, sharedInformers, assert := setUp(t)
defer etcdserver.Terminate(t)
config.GenericConfig.EnableIndex = true
config.GenericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(openapigen.GetOpenAPIDefinitions, openapinamer.NewDefinitionNamer(legacyscheme.Scheme))
config.GenericConfig.OpenAPIConfig.Info = &spec.Info{
InfoProps: spec.InfoProps{
Title: "Kubernetes",
Version: "unversioned",
},
}
config.GenericConfig.SwaggerConfig = genericapiserver.DefaultSwaggerConfig()
master, err := config.Complete(sharedInformers).New(genericapiserver.NewEmptyDelegate())
if err != nil {
t.Fatalf("Error in bringing up the master: %v", err)
}
// make sure swagger.json is not registered before calling PrepareRun.
server := httptest.NewServer(master.GenericAPIServer.Handler.Director)
defer server.Close()
resp, err := http.Get(server.URL + "/swagger.json")
if !assert.NoError(err) {
t.Errorf("unexpected error: %v", err)
}
assert.Equal(http.StatusNotFound, resp.StatusCode)
master.GenericAPIServer.PrepareRun()
resp, err = http.Get(server.URL + "/swagger.json")
if !assert.NoError(err) {
t.Errorf("unexpected error: %v", err)
}
assert.Equal(http.StatusOK, resp.StatusCode)
// as json schema
var sch spec.Schema
if assert.NoError(decodeResponse(resp, &sch)) {
validator := validate.NewSchemaValidator(spec.MustLoadSwagger20Schema(), nil, "", strfmt.Default)
res := validator.Validate(&sch)
assert.NoError(res.AsError())
}
// Validate OpenApi spec
doc, err := loads.Spec(server.URL + "/swagger.json")
if assert.NoError(err) {
validator := validate.NewSpecValidator(doc.Schema(), strfmt.Default)
res, warns := validator.Validate(doc)
assert.NoError(res.AsError())
if !warns.IsValid() {
t.Logf("Open API spec on root has some warnings : %v", warns)
}
}
}

View File

@ -1,357 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package master
import (
"context"
"crypto/tls"
"encoding/json"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"testing"
certificatesapiv1beta1 "k8s.io/api/certificates/v1beta1"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/version"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/options"
serverstorage "k8s.io/apiserver/pkg/server/storage"
etcdtesting "k8s.io/apiserver/pkg/storage/etcd/testing"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/apis/certificates"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/apis/rbac"
"k8s.io/kubernetes/pkg/apis/storage"
kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
"k8s.io/kubernetes/pkg/master/reconcilers"
certificatesrest "k8s.io/kubernetes/pkg/registry/certificates/rest"
corerest "k8s.io/kubernetes/pkg/registry/core/rest"
"k8s.io/kubernetes/pkg/registry/registrytest"
kubeversion "k8s.io/kubernetes/pkg/version"
"github.com/stretchr/testify/assert"
)
// setUp is a convience function for setting up for (most) tests.
func setUp(t *testing.T) (*etcdtesting.EtcdTestServer, Config, informers.SharedInformerFactory, *assert.Assertions) {
server, storageConfig := etcdtesting.NewUnsecuredEtcd3TestClientServer(t)
config := &Config{
GenericConfig: genericapiserver.NewConfig(legacyscheme.Codecs),
ExtraConfig: ExtraConfig{
APIResourceConfigSource: DefaultAPIResourceConfigSource(),
APIServerServicePort: 443,
MasterCount: 1,
EndpointReconcilerType: reconcilers.MasterCountReconcilerType,
},
}
resourceEncoding := serverstorage.NewDefaultResourceEncodingConfig(legacyscheme.Scheme)
resourceEncoding.SetVersionEncoding(api.GroupName, schema.GroupVersion{Group: "", Version: "v1"}, schema.GroupVersion{Group: api.GroupName, Version: runtime.APIVersionInternal})
resourceEncoding.SetVersionEncoding(autoscaling.GroupName, schema.GroupVersion{Group: "autoscaling", Version: "v1"}, schema.GroupVersion{Group: autoscaling.GroupName, Version: runtime.APIVersionInternal})
resourceEncoding.SetVersionEncoding(batch.GroupName, schema.GroupVersion{Group: "batch", Version: "v1"}, schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal})
// FIXME (soltysh): this GroupVersionResource override should be configurable
resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: "batch", Resource: "cronjobs"}, schema.GroupVersion{Group: batch.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal})
resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: "storage.k8s.io", Resource: "volumeattachments"}, schema.GroupVersion{Group: storage.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal})
resourceEncoding.SetVersionEncoding(apps.GroupName, schema.GroupVersion{Group: "apps", Version: "v1"}, schema.GroupVersion{Group: apps.GroupName, Version: runtime.APIVersionInternal})
resourceEncoding.SetVersionEncoding(extensions.GroupName, schema.GroupVersion{Group: "extensions", Version: "v1beta1"}, schema.GroupVersion{Group: extensions.GroupName, Version: runtime.APIVersionInternal})
resourceEncoding.SetVersionEncoding(rbac.GroupName, schema.GroupVersion{Group: "rbac.authorization.k8s.io", Version: "v1"}, schema.GroupVersion{Group: rbac.GroupName, Version: runtime.APIVersionInternal})
resourceEncoding.SetVersionEncoding(certificates.GroupName, schema.GroupVersion{Group: "certificates.k8s.io", Version: "v1beta1"}, schema.GroupVersion{Group: certificates.GroupName, Version: runtime.APIVersionInternal})
storageFactory := serverstorage.NewDefaultStorageFactory(*storageConfig, testapi.StorageMediaType(), legacyscheme.Codecs, resourceEncoding, DefaultAPIResourceConfigSource(), nil)
etcdOptions := options.NewEtcdOptions(storageConfig)
// unit tests don't need watch cache and it leaks lots of goroutines with etcd testing functions during unit tests
etcdOptions.EnableWatchCache = false
err := etcdOptions.ApplyWithStorageFactoryTo(storageFactory, config.GenericConfig)
if err != nil {
t.Fatal(err)
}
kubeVersion := kubeversion.Get()
config.GenericConfig.Version = &kubeVersion
config.ExtraConfig.StorageFactory = storageFactory
config.GenericConfig.LoopbackClientConfig = &restclient.Config{APIPath: "/api", ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}}
config.GenericConfig.PublicAddress = net.ParseIP("192.168.10.4")
config.GenericConfig.LegacyAPIGroupPrefixes = sets.NewString("/api")
config.GenericConfig.LoopbackClientConfig = &restclient.Config{APIPath: "/api", ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}}
config.ExtraConfig.KubeletClientConfig = kubeletclient.KubeletClientConfig{Port: 10250}
config.ExtraConfig.ProxyTransport = utilnet.SetTransportDefaults(&http.Transport{
DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { return nil, nil },
TLSClientConfig: &tls.Config{},
})
clientset, err := kubernetes.NewForConfig(config.GenericConfig.LoopbackClientConfig)
if err != nil {
t.Fatalf("unable to create client set due to %v", err)
}
sharedInformers := informers.NewSharedInformerFactory(clientset, config.GenericConfig.LoopbackClientConfig.Timeout)
return server, *config, sharedInformers, assert.New(t)
}
// TestLegacyRestStorageStrategies ensures that all Storage objects which are using the generic registry Store have
// their various strategies properly wired up. This surfaced as a bug where strategies defined Export functions, but
// they were never used outside of unit tests because the export strategies were not assigned inside the Store.
func TestLegacyRestStorageStrategies(t *testing.T) {
_, etcdserver, masterCfg, _ := newMaster(t)
defer etcdserver.Terminate(t)
storageProvider := corerest.LegacyRESTStorageProvider{
StorageFactory: masterCfg.ExtraConfig.StorageFactory,
ProxyTransport: masterCfg.ExtraConfig.ProxyTransport,
KubeletClientConfig: masterCfg.ExtraConfig.KubeletClientConfig,
EventTTL: masterCfg.ExtraConfig.EventTTL,
ServiceIPRange: masterCfg.ExtraConfig.ServiceIPRange,
ServiceNodePortRange: masterCfg.ExtraConfig.ServiceNodePortRange,
LoopbackClientConfig: masterCfg.GenericConfig.LoopbackClientConfig,
}
_, apiGroupInfo, err := storageProvider.NewLegacyRESTStorage(masterCfg.GenericConfig.RESTOptionsGetter)
if err != nil {
t.Errorf("failed to create legacy REST storage: %v", err)
}
// Any new stores with export logic will need to be added here:
exceptions := registrytest.StrategyExceptions{
// Only these stores should have an export strategy defined:
HasExportStrategy: []string{
"secrets",
"limitRanges",
"nodes",
"podTemplates",
},
}
strategyErrors := registrytest.ValidateStorageStrategies(apiGroupInfo.VersionedResourcesStorageMap["v1"], exceptions)
for _, err := range strategyErrors {
t.Error(err)
}
}
func TestCertificatesRestStorageStrategies(t *testing.T) {
_, etcdserver, masterCfg, _ := newMaster(t)
defer etcdserver.Terminate(t)
certStorageProvider := certificatesrest.RESTStorageProvider{}
apiGroupInfo, _ := certStorageProvider.NewRESTStorage(masterCfg.ExtraConfig.APIResourceConfigSource, masterCfg.GenericConfig.RESTOptionsGetter)
exceptions := registrytest.StrategyExceptions{
HasExportStrategy: []string{
"certificatesigningrequests",
},
}
strategyErrors := registrytest.ValidateStorageStrategies(
apiGroupInfo.VersionedResourcesStorageMap[certificatesapiv1beta1.SchemeGroupVersion.Version], exceptions)
for _, err := range strategyErrors {
t.Error(err)
}
}
func newMaster(t *testing.T) (*Master, *etcdtesting.EtcdTestServer, Config, *assert.Assertions) {
etcdserver, config, sharedInformers, assert := setUp(t)
master, err := config.Complete(sharedInformers).New(genericapiserver.NewEmptyDelegate())
if err != nil {
t.Fatalf("Error in bringing up the master: %v", err)
}
return master, etcdserver, config, assert
}
// TestVersion tests /version
func TestVersion(t *testing.T) {
s, etcdserver, _, _ := newMaster(t)
defer etcdserver.Terminate(t)
req, _ := http.NewRequest("GET", "/version", nil)
resp := httptest.NewRecorder()
s.GenericAPIServer.Handler.ServeHTTP(resp, req)
if resp.Code != 200 {
t.Fatalf("expected http 200, got: %d", resp.Code)
}
var info version.Info
err := json.NewDecoder(resp.Body).Decode(&info)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(kubeversion.Get(), info) {
t.Errorf("Expected %#v, Got %#v", kubeversion.Get(), info)
}
}
type fakeEndpointReconciler struct{}
func (*fakeEndpointReconciler) ReconcileEndpoints(serviceName string, ip net.IP, endpointPorts []api.EndpointPort, reconcilePorts bool) error {
return nil
}
func makeNodeList(nodes []string, nodeResources apiv1.NodeResources) *apiv1.NodeList {
list := apiv1.NodeList{
Items: make([]apiv1.Node, len(nodes)),
}
for i := range nodes {
list.Items[i].Name = nodes[i]
list.Items[i].Status.Capacity = nodeResources.Capacity
}
return &list
}
// TestGetNodeAddresses verifies that proper results are returned
// when requesting node addresses.
func TestGetNodeAddresses(t *testing.T) {
assert := assert.New(t)
fakeNodeClient := fake.NewSimpleClientset(makeNodeList([]string{"node1", "node2"}, apiv1.NodeResources{})).Core().Nodes()
addressProvider := nodeAddressProvider{fakeNodeClient}
// Fail case (no addresses associated with nodes)
nodes, _ := fakeNodeClient.List(metav1.ListOptions{})
addrs, err := addressProvider.externalAddresses()
assert.Error(err, "addresses should have caused an error as there are no addresses.")
assert.Equal([]string(nil), addrs)
// Pass case with External type IP
nodes, _ = fakeNodeClient.List(metav1.ListOptions{})
for index := range nodes.Items {
nodes.Items[index].Status.Addresses = []apiv1.NodeAddress{{Type: apiv1.NodeExternalIP, Address: "127.0.0.1"}}
fakeNodeClient.Update(&nodes.Items[index])
}
addrs, err = addressProvider.externalAddresses()
assert.NoError(err, "addresses should not have returned an error.")
assert.Equal([]string{"127.0.0.1", "127.0.0.1"}, addrs)
}
func decodeResponse(resp *http.Response, obj interface{}) error {
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if err := json.Unmarshal(data, obj); err != nil {
return err
}
return nil
}
// Because we need to be backwards compatible with release 1.1, at endpoints
// that exist in release 1.1, the responses should have empty APIVersion.
func TestAPIVersionOfDiscoveryEndpoints(t *testing.T) {
master, etcdserver, _, assert := newMaster(t)
defer etcdserver.Terminate(t)
server := httptest.NewServer(master.GenericAPIServer.Handler.GoRestfulContainer.ServeMux)
// /api exists in release-1.1
resp, err := http.Get(server.URL + "/api")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
apiVersions := metav1.APIVersions{}
assert.NoError(decodeResponse(resp, &apiVersions))
assert.Equal(apiVersions.APIVersion, "")
// /api/v1 exists in release-1.1
resp, err = http.Get(server.URL + "/api/v1")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
resourceList := metav1.APIResourceList{}
assert.NoError(decodeResponse(resp, &resourceList))
assert.Equal(resourceList.APIVersion, "")
// /apis exists in release-1.1
resp, err = http.Get(server.URL + "/apis")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
groupList := metav1.APIGroupList{}
assert.NoError(decodeResponse(resp, &groupList))
assert.Equal(groupList.APIVersion, "")
// /apis/extensions exists in release-1.1
resp, err = http.Get(server.URL + "/apis/extensions")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
group := metav1.APIGroup{}
assert.NoError(decodeResponse(resp, &group))
assert.Equal(group.APIVersion, "")
// /apis/extensions/v1beta1 exists in release-1.1
resp, err = http.Get(server.URL + "/apis/extensions/v1beta1")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
resourceList = metav1.APIResourceList{}
assert.NoError(decodeResponse(resp, &resourceList))
assert.Equal(resourceList.APIVersion, "")
// /apis/autoscaling doesn't exist in release-1.1, so the APIVersion field
// should be non-empty in the results returned by the server.
resp, err = http.Get(server.URL + "/apis/autoscaling")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
group = metav1.APIGroup{}
assert.NoError(decodeResponse(resp, &group))
assert.Equal(group.APIVersion, "v1")
// apis/autoscaling/v1 doesn't exist in release-1.1, so the APIVersion field
// should be non-empty in the results returned by the server.
resp, err = http.Get(server.URL + "/apis/autoscaling/v1")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
resourceList = metav1.APIResourceList{}
assert.NoError(decodeResponse(resp, &resourceList))
assert.Equal(resourceList.APIVersion, "v1")
}
func TestNoAlphaVersionsEnabledByDefault(t *testing.T) {
config := DefaultAPIResourceConfigSource()
for gv, enable := range config.GroupVersionConfigs {
if enable && strings.Contains(gv.Version, "alpha") {
t.Errorf("Alpha API version %s enabled by default", gv.String())
}
}
}

View File

@ -1,28 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"ports.go",
],
importpath = "k8s.io/kubernetes/pkg/master/ports",
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,19 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package ports defines ports used by various pieces of the kubernetes
// infrastructure.
package ports // import "k8s.io/kubernetes/pkg/master/ports"

View File

@ -1,44 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ports
const (
// ProxyStatusPort is the default port for the proxy metrics server.
// May be overridden by a flag at startup.
ProxyStatusPort = 10249
// KubeletPort is the default port for the kubelet server on each host machine.
// May be overridden by a flag at startup.
KubeletPort = 10250
// SchedulerPort is the default port for the scheduler status server.
// May be overridden by a flag at startup.
SchedulerPort = 10251
// InsecureKubeControllerManagerPort is the default port for the controller manager status server.
// May be overridden by a flag at startup.
InsecureKubeControllerManagerPort = 10252
// InsecureCloudControllerManagerPort is the default port for the cloud controller manager server.
// This value may be overridden by a flag at startup.
InsecureCloudControllerManagerPort = 10253
// KubeletReadOnlyPort exposes basic read-only services from the kubelet.
// May be overridden by a flag at startup.
// This is necessary for heapster to collect monitoring stats from the kubelet
// until heapster can transition to using the SSL endpoint.
// TODO(roberthbailey): Remove this once we have a better solution for heapster.
KubeletReadOnlyPort = 10255
// ProxyHealthzPort is the default port for the proxy healthz server.
// May be overridden by a flag at startup.
ProxyHealthzPort = 10256
)

View File

@ -1,53 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"lease.go",
"mastercount.go",
"none.go",
"reconcilers.go",
],
importpath = "k8s.io/kubernetes/pkg/master/reconcilers",
visibility = ["//visibility:public"],
deps = [
"//pkg/api/endpoints:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library",
"//pkg/registry/core/endpoint:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
"//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library",
"//vendor/k8s.io/apiserver/pkg/storage:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["lease_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/registry/registrytest:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,21 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package reconcilers provides objects for managing the list of active masters.
// NOTE: The Lease reconciler is not the intended way for any apiserver other
// than kube-apiserver to accomplish the task of Endpoint registration. This is
// a special case for the time being.
package reconcilers

View File

@ -1,290 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reconcilers
/*
Original Source:
https://github.com/openshift/origin/blob/bb340c5dd5ff72718be86fb194dedc0faed7f4c7/pkg/cmd/server/election/lease_endpoint_reconciler.go
*/
import (
"fmt"
"net"
"path"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kruntime "k8s.io/apimachinery/pkg/runtime"
apirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/apiserver/pkg/storage"
"k8s.io/kubernetes/pkg/api/endpoints"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/registry/core/endpoint"
)
// Leases is an interface which assists in managing the set of active masters
type Leases interface {
// ListLeases retrieves a list of the current master IPs
ListLeases() ([]string, error)
// UpdateLease adds or refreshes a master's lease
UpdateLease(ip string) error
// RemoveLease removes a master's lease
RemoveLease(ip string) error
}
type storageLeases struct {
storage storage.Interface
baseKey string
leaseTime time.Duration
}
var _ Leases = &storageLeases{}
// ListLeases retrieves a list of the current master IPs from storage
func (s *storageLeases) ListLeases() ([]string, error) {
ipInfoList := &api.EndpointsList{}
if err := s.storage.List(apirequest.NewDefaultContext(), s.baseKey, "0", storage.Everything, ipInfoList); err != nil {
return nil, err
}
ipList := make([]string, len(ipInfoList.Items))
for i, ip := range ipInfoList.Items {
ipList[i] = ip.Subsets[0].Addresses[0].IP
}
glog.V(6).Infof("Current master IPs listed in storage are %v", ipList)
return ipList, nil
}
// UpdateLease resets the TTL on a master IP in storage
func (s *storageLeases) UpdateLease(ip string) error {
key := path.Join(s.baseKey, ip)
return s.storage.GuaranteedUpdate(apirequest.NewDefaultContext(), key, &api.Endpoints{}, true, nil, func(input kruntime.Object, respMeta storage.ResponseMeta) (kruntime.Object, *uint64, error) {
// just make sure we've got the right IP set, and then refresh the TTL
existing := input.(*api.Endpoints)
existing.Subsets = []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: ip}},
},
}
// leaseTime needs to be in seconds
leaseTime := uint64(s.leaseTime / time.Second)
// NB: GuaranteedUpdate does not perform the store operation unless
// something changed between load and store (not including resource
// version), meaning we can't refresh the TTL without actually
// changing a field.
existing.Generation++
glog.V(6).Infof("Resetting TTL on master IP %q listed in storage to %v", ip, leaseTime)
return existing, &leaseTime, nil
})
}
// RemoveLease removes the lease on a master IP in storage
func (s *storageLeases) RemoveLease(ip string) error {
return s.storage.Delete(apirequest.NewDefaultContext(), s.baseKey+"/"+ip, &api.Endpoints{}, nil)
}
// NewLeases creates a new etcd-based Leases implementation.
func NewLeases(storage storage.Interface, baseKey string, leaseTime time.Duration) Leases {
return &storageLeases{
storage: storage,
baseKey: baseKey,
leaseTime: leaseTime,
}
}
type leaseEndpointReconciler struct {
endpointRegistry endpoint.Registry
masterLeases Leases
stopReconcilingCalled bool
reconcilingLock sync.Mutex
}
// NewLeaseEndpointReconciler creates a new LeaseEndpoint reconciler
func NewLeaseEndpointReconciler(endpointRegistry endpoint.Registry, masterLeases Leases) EndpointReconciler {
return &leaseEndpointReconciler{
endpointRegistry: endpointRegistry,
masterLeases: masterLeases,
stopReconcilingCalled: false,
}
}
// ReconcileEndpoints lists keys in a special etcd directory.
// Each key is expected to have a TTL of R+n, where R is the refresh interval
// at which this function is called, and n is some small value. If an
// apiserver goes down, it will fail to refresh its key's TTL and the key will
// expire. ReconcileEndpoints will notice that the endpoints object is
// different from the directory listing, and update the endpoints object
// accordingly.
func (r *leaseEndpointReconciler) ReconcileEndpoints(serviceName string, ip net.IP, endpointPorts []api.EndpointPort, reconcilePorts bool) error {
r.reconcilingLock.Lock()
defer r.reconcilingLock.Unlock()
if r.stopReconcilingCalled {
return nil
}
// Refresh the TTL on our key, independently of whether any error or
// update conflict happens below. This makes sure that at least some of
// the masters will add our endpoint.
if err := r.masterLeases.UpdateLease(ip.String()); err != nil {
return err
}
return r.doReconcile(serviceName, endpointPorts, reconcilePorts)
}
func (r *leaseEndpointReconciler) doReconcile(serviceName string, endpointPorts []api.EndpointPort, reconcilePorts bool) error {
ctx := apirequest.NewDefaultContext()
// Retrieve the current list of endpoints...
e, err := r.endpointRegistry.GetEndpoints(ctx, serviceName, &metav1.GetOptions{})
if err != nil {
if !errors.IsNotFound(err) {
return err
}
e = &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
Namespace: api.NamespaceDefault,
},
}
}
// ... and the list of master IP keys from etcd
masterIPs, err := r.masterLeases.ListLeases()
if err != nil {
return err
}
// Since we just refreshed our own key, assume that zero endpoints
// returned from storage indicates an issue or invalid state, and thus do
// not update the endpoints list based on the result.
if len(masterIPs) == 0 {
return fmt.Errorf("no master IPs were listed in storage, refusing to erase all endpoints for the kubernetes service")
}
// Next, we compare the current list of endpoints with the list of master IP keys
formatCorrect, ipCorrect, portsCorrect := checkEndpointSubsetFormatWithLease(e, masterIPs, endpointPorts, reconcilePorts)
if formatCorrect && ipCorrect && portsCorrect {
return nil
}
if !formatCorrect {
// Something is egregiously wrong, just re-make the endpoints record.
e.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{},
Ports: endpointPorts,
}}
}
if !formatCorrect || !ipCorrect {
// repopulate the addresses according to the expected IPs from etcd
e.Subsets[0].Addresses = make([]api.EndpointAddress, len(masterIPs))
for ind, ip := range masterIPs {
e.Subsets[0].Addresses[ind] = api.EndpointAddress{IP: ip}
}
// Lexicographic order is retained by this step.
e.Subsets = endpoints.RepackSubsets(e.Subsets)
}
if !portsCorrect {
// Reset ports.
e.Subsets[0].Ports = endpointPorts
}
glog.Warningf("Resetting endpoints for master service %q to %v", serviceName, masterIPs)
return r.endpointRegistry.UpdateEndpoints(ctx, e, rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc)
}
// checkEndpointSubsetFormatWithLease determines if the endpoint is in the
// format ReconcileEndpoints expects when the controller is using leases.
//
// Return values:
// * formatCorrect is true if exactly one subset is found.
// * ipsCorrect when the addresses in the endpoints match the expected addresses list
// * portsCorrect is true when endpoint ports exactly match provided ports.
// portsCorrect is only evaluated when reconcilePorts is set to true.
func checkEndpointSubsetFormatWithLease(e *api.Endpoints, expectedIPs []string, ports []api.EndpointPort, reconcilePorts bool) (formatCorrect bool, ipsCorrect bool, portsCorrect bool) {
if len(e.Subsets) != 1 {
return false, false, false
}
sub := &e.Subsets[0]
portsCorrect = true
if reconcilePorts {
if len(sub.Ports) != len(ports) {
portsCorrect = false
} else {
for i, port := range ports {
if port != sub.Ports[i] {
portsCorrect = false
break
}
}
}
}
ipsCorrect = true
if len(sub.Addresses) != len(expectedIPs) {
ipsCorrect = false
} else {
// check the actual content of the addresses
// present addrs is used as a set (the keys) and to indicate if a
// value was already found (the values)
presentAddrs := make(map[string]bool, len(expectedIPs))
for _, ip := range expectedIPs {
presentAddrs[ip] = false
}
// uniqueness is assumed amongst all Addresses.
for _, addr := range sub.Addresses {
if alreadySeen, ok := presentAddrs[addr.IP]; alreadySeen || !ok {
ipsCorrect = false
break
}
presentAddrs[addr.IP] = true
}
}
return true, ipsCorrect, portsCorrect
}
func (r *leaseEndpointReconciler) StopReconciling(serviceName string, ip net.IP, endpointPorts []api.EndpointPort) error {
r.reconcilingLock.Lock()
defer r.reconcilingLock.Unlock()
r.stopReconcilingCalled = true
if err := r.masterLeases.RemoveLease(ip.String()); err != nil {
return err
}
return r.doReconcile(serviceName, endpointPorts, true)
}

View File

@ -1,637 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reconcilers
/*
Original Source:
https://github.com/openshift/origin/blob/bb340c5dd5ff72718be86fb194dedc0faed7f4c7/pkg/cmd/server/election/lease_endpoint_reconciler_test.go
*/
import (
"net"
"reflect"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/registry/registrytest"
)
type fakeLeases struct {
keys map[string]bool
}
var _ Leases = &fakeLeases{}
func newFakeLeases() *fakeLeases {
return &fakeLeases{make(map[string]bool)}
}
func (f *fakeLeases) ListLeases() ([]string, error) {
res := make([]string, 0, len(f.keys))
for ip := range f.keys {
res = append(res, ip)
}
return res, nil
}
func (f *fakeLeases) UpdateLease(ip string) error {
f.keys[ip] = true
return nil
}
func (f *fakeLeases) RemoveLease(ip string) error {
delete(f.keys, ip)
return nil
}
func (f *fakeLeases) SetKeys(keys []string) {
for _, ip := range keys {
f.keys[ip] = false
}
}
func (f *fakeLeases) GetUpdatedKeys() []string {
res := []string{}
for ip, updated := range f.keys {
if updated {
res = append(res, ip)
}
}
return res
}
func TestLeaseEndpointReconciler(t *testing.T) {
ns := api.NamespaceDefault
om := func(name string) metav1.ObjectMeta {
return metav1.ObjectMeta{Namespace: ns, Name: name}
}
reconcileTests := []struct {
testName string
serviceName string
ip string
endpointPorts []api.EndpointPort
endpointKeys []string
endpoints *api.EndpointsList
expectUpdate *api.Endpoints // nil means none expected
}{
{
testName: "no existing endpoints",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
endpoints: nil,
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "existing endpoints satisfy",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
},
{
testName: "existing endpoints satisfy + refresh existing key",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
endpointKeys: []string{"1.2.3.4"},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
},
{
testName: "existing endpoints satisfy but too many",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}, {IP: "4.3.2.1"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "existing endpoints satisfy but too many + extra masters",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
endpointKeys: []string{"1.2.3.4", "4.3.2.2", "4.3.2.3", "4.3.2.4"},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{
{IP: "1.2.3.4"},
{IP: "4.3.2.1"},
{IP: "4.3.2.2"},
{IP: "4.3.2.3"},
{IP: "4.3.2.4"},
},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{
{IP: "1.2.3.4"},
{IP: "4.3.2.2"},
{IP: "4.3.2.3"},
{IP: "4.3.2.4"},
},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "existing endpoints satisfy but too many + extra masters + delete first",
serviceName: "foo",
ip: "4.3.2.4",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
endpointKeys: []string{"4.3.2.1", "4.3.2.2", "4.3.2.3", "4.3.2.4"},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{
{IP: "1.2.3.4"},
{IP: "4.3.2.1"},
{IP: "4.3.2.2"},
{IP: "4.3.2.3"},
{IP: "4.3.2.4"},
},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{
{IP: "4.3.2.1"},
{IP: "4.3.2.2"},
{IP: "4.3.2.3"},
{IP: "4.3.2.4"},
},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "existing endpoints current IP missing",
serviceName: "foo",
ip: "4.3.2.2",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
endpointKeys: []string{"4.3.2.1"},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{
{IP: "4.3.2.1"},
},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{
{IP: "4.3.2.1"},
{IP: "4.3.2.2"},
},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "existing endpoints wrong name",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("bar"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "existing endpoints wrong IP",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "4.3.2.1"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "existing endpoints wrong port",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 9090, Protocol: "TCP"}},
}},
}},
},
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "existing endpoints wrong protocol",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "UDP"}},
}},
}},
},
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "existing endpoints wrong port name",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{{Name: "baz", Port: 8080, Protocol: "TCP"}},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "baz", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "existing endpoints extra service ports satisfy",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{
{Name: "foo", Port: 8080, Protocol: "TCP"},
{Name: "bar", Port: 1000, Protocol: "TCP"},
{Name: "baz", Port: 1010, Protocol: "TCP"},
},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{
{Name: "foo", Port: 8080, Protocol: "TCP"},
{Name: "bar", Port: 1000, Protocol: "TCP"},
{Name: "baz", Port: 1010, Protocol: "TCP"},
},
}},
}},
},
},
{
testName: "existing endpoints extra service ports missing port",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{
{Name: "foo", Port: 8080, Protocol: "TCP"},
{Name: "bar", Port: 1000, Protocol: "TCP"},
},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{
{Name: "foo", Port: 8080, Protocol: "TCP"},
{Name: "bar", Port: 1000, Protocol: "TCP"},
},
}},
},
},
}
for _, test := range reconcileTests {
fakeLeases := newFakeLeases()
fakeLeases.SetKeys(test.endpointKeys)
registry := &registrytest.EndpointRegistry{
Endpoints: test.endpoints,
}
r := NewLeaseEndpointReconciler(registry, fakeLeases)
err := r.ReconcileEndpoints(test.serviceName, net.ParseIP(test.ip), test.endpointPorts, true)
if err != nil {
t.Errorf("case %q: unexpected error: %v", test.testName, err)
}
if test.expectUpdate != nil {
if len(registry.Updates) != 1 {
t.Errorf("case %q: unexpected updates: %v", test.testName, registry.Updates)
} else if e, a := test.expectUpdate, &registry.Updates[0]; !reflect.DeepEqual(e, a) {
t.Errorf("case %q: expected update:\n%#v\ngot:\n%#v\n", test.testName, e, a)
}
}
if test.expectUpdate == nil && len(registry.Updates) > 0 {
t.Errorf("case %q: no update expected, yet saw: %v", test.testName, registry.Updates)
}
if updatedKeys := fakeLeases.GetUpdatedKeys(); len(updatedKeys) != 1 || updatedKeys[0] != test.ip {
t.Errorf("case %q: expected the master's IP to be refreshed, but the following IPs were refreshed instead: %v", test.testName, updatedKeys)
}
}
nonReconcileTests := []struct {
testName string
serviceName string
ip string
endpointPorts []api.EndpointPort
endpointKeys []string
endpoints *api.EndpointsList
expectUpdate *api.Endpoints // nil means none expected
}{
{
testName: "existing endpoints extra service ports missing port no update",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{
{Name: "foo", Port: 8080, Protocol: "TCP"},
{Name: "bar", Port: 1000, Protocol: "TCP"},
},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
expectUpdate: nil,
},
{
testName: "existing endpoints extra service ports, wrong ports, wrong IP",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{
{Name: "foo", Port: 8080, Protocol: "TCP"},
{Name: "bar", Port: 1000, Protocol: "TCP"},
},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "4.3.2.1"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "no existing endpoints",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
endpoints: nil,
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
}
for _, test := range nonReconcileTests {
t.Run(test.testName, func(t *testing.T) {
fakeLeases := newFakeLeases()
fakeLeases.SetKeys(test.endpointKeys)
registry := &registrytest.EndpointRegistry{
Endpoints: test.endpoints,
}
r := NewLeaseEndpointReconciler(registry, fakeLeases)
err := r.ReconcileEndpoints(test.serviceName, net.ParseIP(test.ip), test.endpointPorts, false)
if err != nil {
t.Errorf("case %q: unexpected error: %v", test.testName, err)
}
if test.expectUpdate != nil {
if len(registry.Updates) != 1 {
t.Errorf("case %q: unexpected updates: %v", test.testName, registry.Updates)
} else if e, a := test.expectUpdate, &registry.Updates[0]; !reflect.DeepEqual(e, a) {
t.Errorf("case %q: expected update:\n%#v\ngot:\n%#v\n", test.testName, e, a)
}
}
if test.expectUpdate == nil && len(registry.Updates) > 0 {
t.Errorf("case %q: no update expected, yet saw: %v", test.testName, registry.Updates)
}
if updatedKeys := fakeLeases.GetUpdatedKeys(); len(updatedKeys) != 1 || updatedKeys[0] != test.ip {
t.Errorf("case %q: expected the master's IP to be refreshed, but the following IPs were refreshed instead: %v", test.testName, updatedKeys)
}
})
}
}
func TestLeaseStopReconciling(t *testing.T) {
ns := api.NamespaceDefault
om := func(name string) metav1.ObjectMeta {
return metav1.ObjectMeta{Namespace: ns, Name: name}
}
stopTests := []struct {
testName string
serviceName string
ip string
endpointPorts []api.EndpointPort
endpointKeys []string
endpoints *api.EndpointsList
expectUpdate *api.Endpoints // nil means none expected
}{
{
testName: "successful stop reconciling",
serviceName: "foo",
ip: "1.2.3.4",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
endpointKeys: []string{"1.2.3.4", "4.3.2.2", "4.3.2.3", "4.3.2.4"},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{
{IP: "1.2.3.4"},
{IP: "4.3.2.2"},
{IP: "4.3.2.3"},
{IP: "4.3.2.4"},
},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
expectUpdate: &api.Endpoints{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{
{IP: "4.3.2.2"},
{IP: "4.3.2.3"},
{IP: "4.3.2.4"},
},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
},
},
{
testName: "stop reconciling with ip not in endpoint ip list",
serviceName: "foo",
ip: "5.6.7.8",
endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
endpointKeys: []string{"1.2.3.4", "4.3.2.2", "4.3.2.3", "4.3.2.4"},
endpoints: &api.EndpointsList{
Items: []api.Endpoints{{
ObjectMeta: om("foo"),
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{
{IP: "1.2.3.4"},
{IP: "4.3.2.2"},
{IP: "4.3.2.3"},
{IP: "4.3.2.4"},
},
Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}},
}},
}},
},
},
}
for _, test := range stopTests {
t.Run(test.testName, func(t *testing.T) {
fakeLeases := newFakeLeases()
fakeLeases.SetKeys(test.endpointKeys)
registry := &registrytest.EndpointRegistry{
Endpoints: test.endpoints,
}
r := NewLeaseEndpointReconciler(registry, fakeLeases)
err := r.StopReconciling(test.serviceName, net.ParseIP(test.ip), test.endpointPorts)
if err != nil {
t.Errorf("case %q: unexpected error: %v", test.testName, err)
}
if test.expectUpdate != nil {
if len(registry.Updates) != 1 {
t.Errorf("case %q: unexpected updates: %v", test.testName, registry.Updates)
} else if e, a := test.expectUpdate, &registry.Updates[0]; !reflect.DeepEqual(e, a) {
t.Errorf("case %q: expected update:\n%#v\ngot:\n%#v\n", test.testName, e, a)
}
}
if test.expectUpdate == nil && len(registry.Updates) > 0 {
t.Errorf("case %q: no update expected, yet saw: %v", test.testName, registry.Updates)
}
for _, key := range fakeLeases.GetUpdatedKeys() {
if key == test.ip {
t.Errorf("case %q: Found ip %s in leases but shouldn't be there", test.testName, key)
}
}
})
}
}

View File

@ -1,245 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package reconcilers master count based reconciler
package reconcilers
import (
"net"
"sync"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/retry"
"k8s.io/kubernetes/pkg/api/endpoints"
api "k8s.io/kubernetes/pkg/apis/core"
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
)
// masterCountEndpointReconciler reconciles endpoints based on a specified expected number of
// masters. masterCountEndpointReconciler implements EndpointReconciler.
type masterCountEndpointReconciler struct {
masterCount int
endpointClient coreclient.EndpointsGetter
stopReconcilingCalled bool
reconcilingLock sync.Mutex
}
// NewMasterCountEndpointReconciler creates a new EndpointReconciler that reconciles based on a
// specified expected number of masters.
func NewMasterCountEndpointReconciler(masterCount int, endpointClient coreclient.EndpointsGetter) EndpointReconciler {
return &masterCountEndpointReconciler{
masterCount: masterCount,
endpointClient: endpointClient,
}
}
// ReconcileEndpoints sets the endpoints for the given apiserver service (ro or rw).
// ReconcileEndpoints expects that the endpoints objects it manages will all be
// managed only by ReconcileEndpoints; therefore, to understand this, you need only
// understand the requirements and the body of this function.
//
// Requirements:
// * All apiservers MUST use the same ports for their {rw, ro} services.
// * All apiservers MUST use ReconcileEndpoints and only ReconcileEndpoints to manage the
// endpoints for their {rw, ro} services.
// * All apiservers MUST know and agree on the number of apiservers expected
// to be running (c.masterCount).
// * ReconcileEndpoints is called periodically from all apiservers.
func (r *masterCountEndpointReconciler) ReconcileEndpoints(serviceName string, ip net.IP, endpointPorts []api.EndpointPort, reconcilePorts bool) error {
r.reconcilingLock.Lock()
defer r.reconcilingLock.Unlock()
if r.stopReconcilingCalled {
return nil
}
e, err := r.endpointClient.Endpoints(metav1.NamespaceDefault).Get(serviceName, metav1.GetOptions{})
if err != nil {
e = &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
Namespace: metav1.NamespaceDefault,
},
}
}
if errors.IsNotFound(err) {
// Simply create non-existing endpoints for the service.
e.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: ip.String()}},
Ports: endpointPorts,
}}
_, err = r.endpointClient.Endpoints(metav1.NamespaceDefault).Create(e)
return err
}
// First, determine if the endpoint is in the format we expect (one
// subset, ports matching endpointPorts, N IP addresses).
formatCorrect, ipCorrect, portsCorrect := checkEndpointSubsetFormat(e, ip.String(), endpointPorts, r.masterCount, reconcilePorts)
if !formatCorrect {
// Something is egregiously wrong, just re-make the endpoints record.
e.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: ip.String()}},
Ports: endpointPorts,
}}
glog.Warningf("Resetting endpoints for master service %q to %#v", serviceName, e)
_, err = r.endpointClient.Endpoints(metav1.NamespaceDefault).Update(e)
return err
}
if ipCorrect && portsCorrect {
return nil
}
if !ipCorrect {
// We *always* add our own IP address.
e.Subsets[0].Addresses = append(e.Subsets[0].Addresses, api.EndpointAddress{IP: ip.String()})
// Lexicographic order is retained by this step.
e.Subsets = endpoints.RepackSubsets(e.Subsets)
// If too many IP addresses, remove the ones lexicographically after our
// own IP address. Given the requirements stated at the top of
// this function, this should cause the list of IP addresses to
// become eventually correct.
if addrs := &e.Subsets[0].Addresses; len(*addrs) > r.masterCount {
// addrs is a pointer because we're going to mutate it.
for i, addr := range *addrs {
if addr.IP == ip.String() {
for len(*addrs) > r.masterCount {
// wrap around if necessary.
remove := (i + 1) % len(*addrs)
*addrs = append((*addrs)[:remove], (*addrs)[remove+1:]...)
}
break
}
}
}
}
if !portsCorrect {
// Reset ports.
e.Subsets[0].Ports = endpointPorts
}
glog.Warningf("Resetting endpoints for master service %q to %v", serviceName, e)
_, err = r.endpointClient.Endpoints(metav1.NamespaceDefault).Update(e)
return err
}
func (r *masterCountEndpointReconciler) StopReconciling(serviceName string, ip net.IP, endpointPorts []api.EndpointPort) error {
r.reconcilingLock.Lock()
defer r.reconcilingLock.Unlock()
r.stopReconcilingCalled = true
e, err := r.endpointClient.Endpoints(metav1.NamespaceDefault).Get(serviceName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
// Endpoint doesn't exist
return nil
}
return err
}
// Remove our IP from the list of addresses
new := []api.EndpointAddress{}
for _, addr := range e.Subsets[0].Addresses {
if addr.IP != ip.String() {
new = append(new, addr)
}
}
e.Subsets[0].Addresses = new
e.Subsets = endpoints.RepackSubsets(e.Subsets)
err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
_, err := r.endpointClient.Endpoints(metav1.NamespaceDefault).Update(e)
return err
})
return err
}
// Determine if the endpoint is in the format ReconcileEndpoints expects.
//
// Return values:
// * formatCorrect is true if exactly one subset is found.
// * ipCorrect is true when current master's IP is found and the number
// of addresses is less than or equal to the master count.
// * portsCorrect is true when endpoint ports exactly match provided ports.
// portsCorrect is only evaluated when reconcilePorts is set to true.
func checkEndpointSubsetFormat(e *api.Endpoints, ip string, ports []api.EndpointPort, count int, reconcilePorts bool) (formatCorrect bool, ipCorrect bool, portsCorrect bool) {
if len(e.Subsets) != 1 {
return false, false, false
}
sub := &e.Subsets[0]
portsCorrect = true
if reconcilePorts {
if len(sub.Ports) != len(ports) {
portsCorrect = false
}
for i, port := range ports {
if len(sub.Ports) <= i || port != sub.Ports[i] {
portsCorrect = false
break
}
}
}
for _, addr := range sub.Addresses {
if addr.IP == ip {
ipCorrect = len(sub.Addresses) <= count
break
}
}
return true, ipCorrect, portsCorrect
}
// GetMasterServiceUpdateIfNeeded sets service attributes for the
// given apiserver service.
// * GetMasterServiceUpdateIfNeeded expects that the service object it
// manages will be managed only by GetMasterServiceUpdateIfNeeded;
// therefore, to understand this, you need only understand the
// requirements and the body of this function.
// * GetMasterServiceUpdateIfNeeded ensures that the correct ports are
// are set.
//
// Requirements:
// * All apiservers MUST use GetMasterServiceUpdateIfNeeded and only
// GetMasterServiceUpdateIfNeeded to manage service attributes
// * updateMasterService is called periodically from all apiservers.
func GetMasterServiceUpdateIfNeeded(svc *api.Service, servicePorts []api.ServicePort, serviceType api.ServiceType) (s *api.Service, updated bool) {
// Determine if the service is in the format we expect
// (servicePorts are present and service type matches)
formatCorrect := checkServiceFormat(svc, servicePorts, serviceType)
if formatCorrect {
return svc, false
}
svc.Spec.Ports = servicePorts
svc.Spec.Type = serviceType
return svc, true
}
// Determine if the service is in the correct format
// GetMasterServiceUpdateIfNeeded expects (servicePorts are correct
// and service type matches).
func checkServiceFormat(s *api.Service, ports []api.ServicePort, serviceType api.ServiceType) (formatCorrect bool) {
if s.Spec.Type != serviceType {
return false
}
if len(ports) != len(s.Spec.Ports) {
return false
}
for i, port := range ports {
if port != s.Spec.Ports[i] {
return false
}
}
return true
}

View File

@ -1,42 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package reconcilers a noop based reconciler
package reconcilers
import (
api "k8s.io/kubernetes/pkg/apis/core"
"net"
)
// NoneEndpointReconciler allows for the endpoint reconciler to be disabled
type noneEndpointReconciler struct{}
// NewNoneEndpointReconciler creates a new EndpointReconciler that reconciles based on a
// nothing. It is a no-op.
func NewNoneEndpointReconciler() EndpointReconciler {
return &noneEndpointReconciler{}
}
// ReconcileEndpoints noop reconcile
func (r *noneEndpointReconciler) ReconcileEndpoints(serviceName string, ip net.IP, endpointPorts []api.EndpointPort, reconcilePorts bool) error {
return nil
}
// StopReconciling noop reconcile
func (r *noneEndpointReconciler) StopReconciling(serviceName string, ip net.IP, endpointPorts []api.EndpointPort) error {
return nil
}

View File

@ -1,70 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package reconcilers Endpoint Reconcilers for the apiserver
package reconcilers
import (
api "k8s.io/kubernetes/pkg/apis/core"
"net"
)
// EndpointReconciler knows how to reconcile the endpoints for the apiserver service.
type EndpointReconciler interface {
// ReconcileEndpoints sets the endpoints for the given apiserver service (ro or rw).
// ReconcileEndpoints expects that the endpoints objects it manages will all be
// managed only by ReconcileEndpoints; therefore, to understand this, you need only
// understand the requirements.
//
// Requirements:
// * All apiservers MUST use the same ports for their {rw, ro} services.
// * All apiservers MUST use ReconcileEndpoints and only ReconcileEndpoints to manage the
// endpoints for their {rw, ro} services.
// * ReconcileEndpoints is called periodically from all apiservers.
ReconcileEndpoints(serviceName string, ip net.IP, endpointPorts []api.EndpointPort, reconcilePorts bool) error
StopReconciling(serviceName string, ip net.IP, endpointPorts []api.EndpointPort) error
}
// Type the reconciler type
type Type string
const (
// MasterCountReconcilerType will select the original reconciler
MasterCountReconcilerType Type = "master-count"
// LeaseEndpointReconcilerType will select a storage based reconciler
LeaseEndpointReconcilerType = "lease"
// NoneEndpointReconcilerType will turn off the endpoint reconciler
NoneEndpointReconcilerType = "none"
)
// Types an array of reconciler types
type Types []Type
// AllTypes export all reconcilers
var AllTypes = Types{
MasterCountReconcilerType,
LeaseEndpointReconcilerType,
NoneEndpointReconcilerType,
}
// Names returns a slice of all the reconciler names
func (t Types) Names() []string {
strs := make([]string, len(t))
for i, v := range t {
strs[i] = string(v)
}
return strs
}

View File

@ -1,48 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package master
import (
"fmt"
"net"
"github.com/golang/glog"
kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options"
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
)
// DefaultServiceIPRange takes a the serviceIPRange flag and returns the defaulted service ip range (if needed),
// api server service IP, and an error
func DefaultServiceIPRange(passedServiceClusterIPRange net.IPNet) (net.IPNet, net.IP, error) {
serviceClusterIPRange := passedServiceClusterIPRange
if passedServiceClusterIPRange.IP == nil {
glog.Infof("Network range for service cluster IPs is unspecified. Defaulting to %v.", kubeoptions.DefaultServiceIPCIDR)
serviceClusterIPRange = kubeoptions.DefaultServiceIPCIDR
}
if size := ipallocator.RangeSize(&serviceClusterIPRange); size < 8 {
return net.IPNet{}, net.IP{}, fmt.Errorf("The service cluster IP range must be at least %d IP addresses", 8)
}
// Select the first valid IP from ServiceClusterIPRange to use as the GenericAPIServer service IP.
apiServerServiceIP, err := ipallocator.GetIndexedIP(&serviceClusterIPRange, 1)
if err != nil {
return net.IPNet{}, net.IP{}, err
}
glog.V(4).Infof("Setting service IP to %q (read-write).", apiServerServiceIP)
return serviceClusterIPRange, apiServerServiceIP, nil
}

View File

@ -1,44 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = ["ssh_test.go"],
embed = [":go_default_library"],
deps = [
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = ["ssh.go"],
importpath = "k8s.io/kubernetes/pkg/master/tunneler",
deps = [
"//pkg/ssh:go_default_library",
"//pkg/util/file:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,234 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tunneler
import (
"context"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"sync/atomic"
"time"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/ssh"
utilfile "k8s.io/kubernetes/pkg/util/file"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
)
type InstallSSHKey func(ctx context.Context, user string, data []byte) error
type AddressFunc func() (addresses []string, err error)
type Tunneler interface {
Run(AddressFunc)
Stop()
Dial(ctx context.Context, net, addr string) (net.Conn, error)
SecondsSinceSync() int64
SecondsSinceSSHKeySync() int64
}
// TunnelSyncHealthChecker returns a health func that indicates if a tunneler is healthy.
// It's compatible with healthz.NamedCheck
func TunnelSyncHealthChecker(tunneler Tunneler) func(req *http.Request) error {
return func(req *http.Request) error {
if tunneler == nil {
return nil
}
lag := tunneler.SecondsSinceSync()
if lag > 600 {
return fmt.Errorf("Tunnel sync is taking too long: %d", lag)
}
sshKeyLag := tunneler.SecondsSinceSSHKeySync()
// Since we are syncing ssh-keys every 5 minutes, the allowed
// lag since last sync should be more than 2x higher than that
// to allow for single failure, which can always happen.
// For now set it to 3x, which is 15 minutes.
// For more details see: http://pr.k8s.io/59347
if sshKeyLag > 900 {
return fmt.Errorf("SSHKey sync is taking too long: %d", sshKeyLag)
}
return nil
}
}
type SSHTunneler struct {
// Important: Since these two int64 fields are using sync/atomic, they have to be at the top of the struct due to a bug on 32-bit platforms
// See: https://golang.org/pkg/sync/atomic/ for more information
lastSync int64 // Seconds since Epoch
lastSSHKeySync int64 // Seconds since Epoch
SSHUser string
SSHKeyfile string
InstallSSHKey InstallSSHKey
HealthCheckURL *url.URL
tunnels *ssh.SSHTunnelList
lastSyncMetric prometheus.GaugeFunc
clock clock.Clock
getAddresses AddressFunc
stopChan chan struct{}
}
func New(sshUser, sshKeyfile string, healthCheckURL *url.URL, installSSHKey InstallSSHKey) Tunneler {
return &SSHTunneler{
SSHUser: sshUser,
SSHKeyfile: sshKeyfile,
InstallSSHKey: installSSHKey,
HealthCheckURL: healthCheckURL,
clock: clock.RealClock{},
}
}
// Run establishes tunnel loops and returns
func (c *SSHTunneler) Run(getAddresses AddressFunc) {
if c.stopChan != nil {
return
}
c.stopChan = make(chan struct{})
// Save the address getter
if getAddresses != nil {
c.getAddresses = getAddresses
}
// Usernames are capped @ 32
if len(c.SSHUser) > 32 {
glog.Warning("SSH User is too long, truncating to 32 chars")
c.SSHUser = c.SSHUser[0:32]
}
glog.Infof("Setting up proxy: %s %s", c.SSHUser, c.SSHKeyfile)
// public keyfile is written last, so check for that.
publicKeyFile := c.SSHKeyfile + ".pub"
exists, err := utilfile.FileExists(publicKeyFile)
if err != nil {
glog.Errorf("Error detecting if key exists: %v", err)
} else if !exists {
glog.Infof("Key doesn't exist, attempting to create")
if err := generateSSHKey(c.SSHKeyfile, publicKeyFile); err != nil {
glog.Errorf("Failed to create key pair: %v", err)
}
}
c.tunnels = ssh.NewSSHTunnelList(c.SSHUser, c.SSHKeyfile, c.HealthCheckURL, c.stopChan)
// Sync loop to ensure that the SSH key has been installed.
c.lastSSHKeySync = c.clock.Now().Unix()
c.installSSHKeySyncLoop(c.SSHUser, publicKeyFile)
// Sync tunnelList w/ nodes.
c.lastSync = c.clock.Now().Unix()
c.nodesSyncLoop()
}
// Stop gracefully shuts down the tunneler
func (c *SSHTunneler) Stop() {
if c.stopChan != nil {
close(c.stopChan)
c.stopChan = nil
}
}
func (c *SSHTunneler) Dial(ctx context.Context, net, addr string) (net.Conn, error) {
return c.tunnels.Dial(ctx, net, addr)
}
func (c *SSHTunneler) SecondsSinceSync() int64 {
now := c.clock.Now().Unix()
then := atomic.LoadInt64(&c.lastSync)
return now - then
}
func (c *SSHTunneler) SecondsSinceSSHKeySync() int64 {
now := c.clock.Now().Unix()
then := atomic.LoadInt64(&c.lastSSHKeySync)
return now - then
}
func (c *SSHTunneler) installSSHKeySyncLoop(user, publicKeyfile string) {
go wait.Until(func() {
if c.InstallSSHKey == nil {
glog.Error("Won't attempt to install ssh key: InstallSSHKey function is nil")
return
}
key, err := ssh.ParsePublicKeyFromFile(publicKeyfile)
if err != nil {
glog.Errorf("Failed to load public key: %v", err)
return
}
keyData, err := ssh.EncodeSSHKey(key)
if err != nil {
glog.Errorf("Failed to encode public key: %v", err)
return
}
if err := c.InstallSSHKey(context.TODO(), user, keyData); err != nil {
glog.Errorf("Failed to install ssh key: %v", err)
return
}
atomic.StoreInt64(&c.lastSSHKeySync, c.clock.Now().Unix())
}, 5*time.Minute, c.stopChan)
}
// nodesSyncLoop lists nodes every 15 seconds, calling Update() on the TunnelList
// each time (Update() is a noop if no changes are necessary).
func (c *SSHTunneler) nodesSyncLoop() {
// TODO (cjcullen) make this watch.
go wait.Until(func() {
addrs, err := c.getAddresses()
glog.V(4).Infof("Calling update w/ addrs: %v", addrs)
if err != nil {
glog.Errorf("Failed to getAddresses: %v", err)
}
c.tunnels.Update(addrs)
atomic.StoreInt64(&c.lastSync, c.clock.Now().Unix())
}, 15*time.Second, c.stopChan)
}
func generateSSHKey(privateKeyfile, publicKeyfile string) error {
private, public, err := ssh.GenerateKey(2048)
if err != nil {
return err
}
// If private keyfile already exists, we must have only made it halfway
// through last time, so delete it.
exists, err := utilfile.FileExists(privateKeyfile)
if err != nil {
glog.Errorf("Error detecting if private key exists: %v", err)
} else if exists {
glog.Infof("Private key exists, but public key does not")
if err := os.Remove(privateKeyfile); err != nil {
glog.Errorf("Failed to remove stale private key: %v", err)
}
}
if err := ioutil.WriteFile(privateKeyfile, ssh.EncodePrivateKey(private), 0600); err != nil {
return err
}
publicKeyBytes, err := ssh.EncodePublicKey(public)
if err != nil {
return err
}
if err := ioutil.WriteFile(publicKeyfile+".tmp", publicKeyBytes, 0600); err != nil {
return err
}
return os.Rename(publicKeyfile+".tmp", publicKeyfile)
}

View File

@ -1,163 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tunneler
import (
"context"
"fmt"
"net"
"os"
"path/filepath"
"testing"
"time"
"k8s.io/apimachinery/pkg/util/clock"
"github.com/stretchr/testify/assert"
)
// TestSecondsSinceSync verifies that proper results are returned
// when checking the time between syncs
func TestSecondsSinceSync(t *testing.T) {
tests := []struct {
name string
lastSync int64
clock *clock.FakeClock
want int64
}{
{
name: "Nano Second. No difference",
lastSync: time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC).Unix(),
clock: clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 1, 2, time.UTC)),
want: int64(0),
},
{
name: "Second",
lastSync: time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC).Unix(),
clock: clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 2, 1, time.UTC)),
want: int64(1),
},
{
name: "Minute",
lastSync: time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC).Unix(),
clock: clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 2, 1, 1, time.UTC)),
want: int64(60),
},
{
name: "Hour",
lastSync: time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC).Unix(),
clock: clock.NewFakeClock(time.Date(2015, time.January, 1, 2, 1, 1, 1, time.UTC)),
want: int64(3600),
},
{
name: "Day",
lastSync: time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC).Unix(),
clock: clock.NewFakeClock(time.Date(2015, time.January, 2, 1, 1, 1, 1, time.UTC)),
want: int64(86400),
},
{
name: "Month",
lastSync: time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC).Unix(),
clock: clock.NewFakeClock(time.Date(2015, time.February, 1, 1, 1, 1, 1, time.UTC)),
want: int64(2678400),
},
{
name: "Future Month. Should be -Month",
lastSync: time.Date(2015, time.February, 1, 1, 1, 1, 1, time.UTC).Unix(),
clock: clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 1, 2, time.UTC)),
want: int64(-2678400),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tunneler := &SSHTunneler{}
assert := assert.New(t)
tunneler.lastSync = tt.lastSync
tunneler.clock = tt.clock
assert.Equal(int64(tt.want), tunneler.SecondsSinceSync())
})
}
}
// generateTempFile creates a temporary file path
func generateTempFilePath(prefix string) string {
tmpPath, _ := filepath.Abs(fmt.Sprintf("%s/%s-%d", os.TempDir(), prefix, time.Now().Unix()))
return tmpPath
}
// TestGenerateSSHKey verifies that SSH key generation does indeed
// generate keys even with keys already exist.
func TestGenerateSSHKey(t *testing.T) {
assert := assert.New(t)
privateKey := generateTempFilePath("private")
publicKey := generateTempFilePath("public")
// Make sure we have no test keys laying around
os.Remove(privateKey)
os.Remove(publicKey)
// Pass case: Sunny day case
err := generateSSHKey(privateKey, publicKey)
assert.NoError(err, "generateSSHKey should not have retuend an error: %s", err)
// Pass case: PrivateKey exists test case
os.Remove(publicKey)
err = generateSSHKey(privateKey, publicKey)
assert.NoError(err, "generateSSHKey should not have retuend an error: %s", err)
// Pass case: PublicKey exists test case
os.Remove(privateKey)
err = generateSSHKey(privateKey, publicKey)
assert.NoError(err, "generateSSHKey should not have retuend an error: %s", err)
// Make sure we have no test keys laying around
os.Remove(privateKey)
os.Remove(publicKey)
// TODO: testing error cases where the file can not be removed?
}
type FakeTunneler struct {
SecondsSinceSyncValue int64
SecondsSinceSSHKeySyncValue int64
}
func (t *FakeTunneler) Run(AddressFunc) {}
func (t *FakeTunneler) Stop() {}
func (t *FakeTunneler) Dial(ctx context.Context, net, addr string) (net.Conn, error) { return nil, nil }
func (t *FakeTunneler) SecondsSinceSync() int64 { return t.SecondsSinceSyncValue }
func (t *FakeTunneler) SecondsSinceSSHKeySync() int64 { return t.SecondsSinceSSHKeySyncValue }
// TestIsTunnelSyncHealthy verifies that the 600 second lag test
// is honored.
func TestIsTunnelSyncHealthy(t *testing.T) {
tunneler := &FakeTunneler{}
// Pass case: 540 second lag
tunneler.SecondsSinceSyncValue = 540
healthFn := TunnelSyncHealthChecker(tunneler)
err := healthFn(nil)
assert.NoError(t, err, "IsTunnelSyncHealthy() should not have returned an error.")
// Fail case: 720 second lag
tunneler.SecondsSinceSyncValue = 720
err = healthFn(nil)
assert.Error(t, err, "IsTunnelSyncHealthy() should have returned an error.")
}