mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor files
This commit is contained in:
65
vendor/k8s.io/kubernetes/test/integration/BUILD
generated
vendored
Normal file
65
vendor/k8s.io/kubernetes/test/integration/BUILD
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"utils.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration",
|
||||
deps = [
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//test/integration/apiserver:all-srcs",
|
||||
"//test/integration/auth:all-srcs",
|
||||
"//test/integration/client:all-srcs",
|
||||
"//test/integration/configmap:all-srcs",
|
||||
"//test/integration/daemonset:all-srcs",
|
||||
"//test/integration/defaulttolerationseconds:all-srcs",
|
||||
"//test/integration/deployment:all-srcs",
|
||||
"//test/integration/etcd:all-srcs",
|
||||
"//test/integration/evictions:all-srcs",
|
||||
"//test/integration/examples:all-srcs",
|
||||
"//test/integration/framework:all-srcs",
|
||||
"//test/integration/garbagecollector:all-srcs",
|
||||
"//test/integration/master:all-srcs",
|
||||
"//test/integration/metrics:all-srcs",
|
||||
"//test/integration/objectmeta:all-srcs",
|
||||
"//test/integration/openshift:all-srcs",
|
||||
"//test/integration/pods:all-srcs",
|
||||
"//test/integration/quota:all-srcs",
|
||||
"//test/integration/replicaset:all-srcs",
|
||||
"//test/integration/replicationcontroller:all-srcs",
|
||||
"//test/integration/scale:all-srcs",
|
||||
"//test/integration/scheduler:all-srcs",
|
||||
"//test/integration/scheduler_perf:all-srcs",
|
||||
"//test/integration/secrets:all-srcs",
|
||||
"//test/integration/serviceaccount:all-srcs",
|
||||
"//test/integration/storageclasses:all-srcs",
|
||||
"//test/integration/ttlcontroller:all-srcs",
|
||||
"//test/integration/volume:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
54
vendor/k8s.io/kubernetes/test/integration/apiserver/BUILD
generated
vendored
Normal file
54
vendor/k8s.io/kubernetes/test/integration/apiserver/BUILD
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"apiserver_test.go",
|
||||
"main_test.go",
|
||||
"patch_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/apiserver",
|
||||
tags = [
|
||||
"etcd",
|
||||
"integration",
|
||||
],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/pborman/uuid:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/endpoints/handlers:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/features:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/pager:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
234
vendor/k8s.io/kubernetes/test/integration/apiserver/apiserver_test.go
generated
vendored
Normal file
234
vendor/k8s.io/kubernetes/test/integration/apiserver/apiserver_test.go
generated
vendored
Normal file
@ -0,0 +1,234 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apiserver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
genericfeatures "k8s.io/apiserver/pkg/features"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/pager"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func setup(t *testing.T) (*httptest.Server, clientset.Interface, framework.CloseFunc) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.ExtraConfig.EnableCoreControllers = false
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
|
||||
clientSet, err := clientset.NewForConfig(&restclient.Config{Host: s.URL})
|
||||
if err != nil {
|
||||
t.Fatalf("Error in create clientset: %v", err)
|
||||
}
|
||||
return s, clientSet, closeFn
|
||||
}
|
||||
|
||||
func verifyStatusCode(t *testing.T, verb, URL, body string, expectedStatusCode int) {
|
||||
// We dont use the typed Go client to send this request to be able to verify the response status code.
|
||||
bodyBytes := bytes.NewReader([]byte(body))
|
||||
req, err := http.NewRequest(verb, URL, bodyBytes)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v in sending req with verb: %s, URL: %s and body: %s", err, verb, URL, body)
|
||||
}
|
||||
transport := http.DefaultTransport
|
||||
glog.Infof("Sending request: %v", req)
|
||||
resp, err := transport.RoundTrip(req)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v in req: %v", err, req)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
b, _ := ioutil.ReadAll(resp.Body)
|
||||
if resp.StatusCode != expectedStatusCode {
|
||||
t.Errorf("Expected status %v, but got %v", expectedStatusCode, resp.StatusCode)
|
||||
t.Errorf("Body: %v", string(b))
|
||||
}
|
||||
}
|
||||
|
||||
func path(resource, namespace, name string) string {
|
||||
return testapi.Extensions.ResourcePath(resource, namespace, name)
|
||||
}
|
||||
|
||||
func newRS(namespace string) *v1beta1.ReplicaSet {
|
||||
return &v1beta1.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicaSet",
|
||||
APIVersion: "extensions/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
GenerateName: "apiserver-test",
|
||||
},
|
||||
Spec: v1beta1.ReplicaSetSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"name": "test"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var cascDel = `
|
||||
{
|
||||
"kind": "DeleteOptions",
|
||||
"apiVersion": "` + testapi.Groups[api.GroupName].GroupVersion().String() + `",
|
||||
"orphanDependents": false
|
||||
}
|
||||
`
|
||||
|
||||
// Tests that the apiserver returns 202 status code as expected.
|
||||
func Test202StatusCode(t *testing.T) {
|
||||
s, clientSet, closeFn := setup(t)
|
||||
defer closeFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("status-code", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
rsClient := clientSet.Extensions().ReplicaSets(ns.Name)
|
||||
|
||||
// 1. Create the resource without any finalizer and then delete it without setting DeleteOptions.
|
||||
// Verify that server returns 200 in this case.
|
||||
rs, err := rsClient.Create(newRS(ns.Name))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create rs: %v", err)
|
||||
}
|
||||
verifyStatusCode(t, "DELETE", s.URL+path("replicasets", ns.Name, rs.Name), "", 200)
|
||||
|
||||
// 2. Create the resource with a finalizer so that the resource is not immediately deleted and then delete it without setting DeleteOptions.
|
||||
// Verify that the apiserver still returns 200 since DeleteOptions.OrphanDependents is not set.
|
||||
rs = newRS(ns.Name)
|
||||
rs.ObjectMeta.Finalizers = []string{"kube.io/dummy-finalizer"}
|
||||
rs, err = rsClient.Create(rs)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create rs: %v", err)
|
||||
}
|
||||
verifyStatusCode(t, "DELETE", s.URL+path("replicasets", ns.Name, rs.Name), "", 200)
|
||||
|
||||
// 3. Create the resource and then delete it with DeleteOptions.OrphanDependents=false.
|
||||
// Verify that the server still returns 200 since the resource is immediately deleted.
|
||||
rs = newRS(ns.Name)
|
||||
rs, err = rsClient.Create(rs)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create rs: %v", err)
|
||||
}
|
||||
verifyStatusCode(t, "DELETE", s.URL+path("replicasets", ns.Name, rs.Name), cascDel, 200)
|
||||
|
||||
// 4. Create the resource with a finalizer so that the resource is not immediately deleted and then delete it with DeleteOptions.OrphanDependents=false.
|
||||
// Verify that the server returns 202 in this case.
|
||||
rs = newRS(ns.Name)
|
||||
rs.ObjectMeta.Finalizers = []string{"kube.io/dummy-finalizer"}
|
||||
rs, err = rsClient.Create(rs)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create rs: %v", err)
|
||||
}
|
||||
verifyStatusCode(t, "DELETE", s.URL+path("replicasets", ns.Name, rs.Name), cascDel, 202)
|
||||
}
|
||||
|
||||
func TestAPIListChunking(t *testing.T) {
|
||||
if err := utilfeature.DefaultFeatureGate.Set(string(genericfeatures.APIListChunking) + "=true"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s, clientSet, closeFn := setup(t)
|
||||
defer closeFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("list-paging", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
rsClient := clientSet.Extensions().ReplicaSets(ns.Name)
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
rs := newRS(ns.Name)
|
||||
rs.Name = fmt.Sprintf("test-%d", i)
|
||||
if _, err := rsClient.Create(rs); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
calls := 0
|
||||
firstRV := ""
|
||||
p := &pager.ListPager{
|
||||
PageSize: 1,
|
||||
PageFn: pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) {
|
||||
calls++
|
||||
list, err := rsClient.List(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if calls == 1 {
|
||||
firstRV = list.ResourceVersion
|
||||
}
|
||||
if calls == 2 {
|
||||
rs := newRS(ns.Name)
|
||||
rs.Name = "test-5"
|
||||
if _, err := rsClient.Create(rs); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}),
|
||||
}
|
||||
listObj, err := p.List(context.Background(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if calls != 4 {
|
||||
t.Errorf("unexpected list invocations: %d", calls)
|
||||
}
|
||||
list := listObj.(metav1.ListInterface)
|
||||
if len(list.GetContinue()) != 0 {
|
||||
t.Errorf("unexpected continue: %s", list.GetContinue())
|
||||
}
|
||||
if list.GetResourceVersion() != firstRV {
|
||||
t.Errorf("unexpected resource version: %s instead of %s", list.GetResourceVersion(), firstRV)
|
||||
}
|
||||
var names []string
|
||||
if err := meta.EachListItem(listObj, func(obj runtime.Object) error {
|
||||
rs := obj.(*v1beta1.ReplicaSet)
|
||||
names = append(names, rs.Name)
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(names, []string{"test-0", "test-1", "test-2", "test-3"}) {
|
||||
t.Errorf("unexpected items: %#v", list)
|
||||
}
|
||||
}
|
27
vendor/k8s.io/kubernetes/test/integration/apiserver/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/apiserver/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apiserver
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
114
vendor/k8s.io/kubernetes/test/integration/apiserver/patch_test.go
generated
vendored
Normal file
114
vendor/k8s.io/kubernetes/test/integration/apiserver/patch_test.go
generated
vendored
Normal file
@ -0,0 +1,114 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apiserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
"github.com/pborman/uuid"
|
||||
|
||||
"reflect"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
// Tests that the apiserver retries non-overlapping conflicts on patches
|
||||
func TestPatchConflicts(t *testing.T) {
|
||||
s, clientSet, closeFn := setup(t)
|
||||
defer closeFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("status-code", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
// Create the object we're going to conflict on
|
||||
clientSet.CoreV1().Secrets(ns.Name).Create(&v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
// Populate annotations so the strategic patch descends, compares, and notices the $patch directive
|
||||
Annotations: map[string]string{"initial": "value"},
|
||||
},
|
||||
})
|
||||
client := clientSet.CoreV1().RESTClient()
|
||||
|
||||
successes := int32(0)
|
||||
|
||||
// Run a lot of simultaneous patch operations to exercise internal API server retry of patch application.
|
||||
// Internally, a patch API call retries up to MaxRetryWhenPatchConflicts times if the resource version of the object has changed.
|
||||
// If the resource version of the object changed between attempts, that means another one of our patch requests succeeded.
|
||||
// That means if we run 2*MaxRetryWhenPatchConflicts patch attempts, we should see at least MaxRetryWhenPatchConflicts succeed.
|
||||
wg := sync.WaitGroup{}
|
||||
for i := 0; i < (2 * handlers.MaxRetryWhenPatchConflicts); i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
annotationName := fmt.Sprintf("annotation-%d", i)
|
||||
labelName := fmt.Sprintf("label-%d", i)
|
||||
value := uuid.NewRandom().String()
|
||||
|
||||
obj, err := client.Patch(types.StrategicMergePatchType).
|
||||
Namespace(ns.Name).
|
||||
Resource("secrets").
|
||||
Name("test").
|
||||
Body([]byte(fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}, "annotations":{"$patch":"replace","%s":"%s"}}}`, labelName, value, annotationName, value))).
|
||||
Do().
|
||||
Get()
|
||||
|
||||
if errors.IsConflict(err) {
|
||||
t.Logf("tolerated conflict error patching %s: %v", "secrets", err)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("error patching %s: %v", "secrets", err)
|
||||
return
|
||||
}
|
||||
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
t.Errorf("error getting object from %s: %v", "secrets", err)
|
||||
return
|
||||
}
|
||||
// make sure the label we wanted was effective
|
||||
if accessor.GetLabels()[labelName] != value {
|
||||
t.Errorf("patch of %s was ineffective, expected %s=%s, got labels %#v", "secrets", labelName, value, accessor.GetLabels())
|
||||
return
|
||||
}
|
||||
// make sure the patch directive didn't get lost, and that the entire annotation map was replaced
|
||||
if !reflect.DeepEqual(accessor.GetAnnotations(), map[string]string{annotationName: value}) {
|
||||
t.Errorf("patch of %s with $patch directive was ineffective, didn't replace entire annotations map: %#v", "secrets", accessor.GetAnnotations())
|
||||
}
|
||||
|
||||
atomic.AddInt32(&successes, 1)
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
if successes < handlers.MaxRetryWhenPatchConflicts {
|
||||
t.Errorf("Expected at least %d successful patches for %s, got %d", handlers.MaxRetryWhenPatchConflicts, "secrets", successes)
|
||||
} else {
|
||||
t.Logf("Got %d successful patches for %s", successes, "secrets")
|
||||
}
|
||||
|
||||
}
|
92
vendor/k8s.io/kubernetes/test/integration/auth/BUILD
generated
vendored
Normal file
92
vendor/k8s.io/kubernetes/test/integration/auth/BUILD
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"accessreview_test.go",
|
||||
"auth_test.go",
|
||||
"bootstraptoken_test.go",
|
||||
"main_test.go",
|
||||
"node_test.go",
|
||||
"rbac_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/auth",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/authorization:go_default_library",
|
||||
"//pkg/apis/autoscaling:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/apis/policy:go_default_library",
|
||||
"//pkg/apis/rbac:go_default_library",
|
||||
"//pkg/auth/authorizer/abac:go_default_library",
|
||||
"//pkg/auth/nodeidentifier:go_default_library",
|
||||
"//pkg/bootstrap/api:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubeapiserver/authorizer:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//pkg/registry/rbac/clusterrole:go_default_library",
|
||||
"//pkg/registry/rbac/clusterrole/storage:go_default_library",
|
||||
"//pkg/registry/rbac/clusterrolebinding:go_default_library",
|
||||
"//pkg/registry/rbac/clusterrolebinding/storage:go_default_library",
|
||||
"//pkg/registry/rbac/role:go_default_library",
|
||||
"//pkg/registry/rbac/role/storage:go_default_library",
|
||||
"//pkg/registry/rbac/rolebinding:go_default_library",
|
||||
"//pkg/registry/rbac/rolebinding/storage:go_default_library",
|
||||
"//plugin/pkg/admission/admit:go_default_library",
|
||||
"//plugin/pkg/admission/noderestriction:go_default_library",
|
||||
"//plugin/pkg/auth/authenticator/token/bootstrap:go_default_library",
|
||||
"//plugin/pkg/auth/authorizer/rbac:go_default_library",
|
||||
"//test/e2e/lifecycle/bootstrap:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/authentication/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/group:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/tokentest:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/transport:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
347
vendor/k8s.io/kubernetes/test/integration/auth/accessreview_test.go
generated
vendored
Normal file
347
vendor/k8s.io/kubernetes/test/integration/auth/accessreview_test.go
generated
vendored
Normal file
@ -0,0 +1,347 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
authorizationapi "k8s.io/kubernetes/pkg/apis/authorization"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/admit"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
// Inject into master an authorizer that uses user info.
|
||||
// TODO(etune): remove this test once a more comprehensive built-in authorizer is implemented.
|
||||
type sarAuthorizer struct{}
|
||||
|
||||
func (sarAuthorizer) Authorize(a authorizer.Attributes) (authorizer.Decision, string, error) {
|
||||
if a.GetUser().GetName() == "dave" {
|
||||
return authorizer.DecisionNoOpinion, "no", errors.New("I'm sorry, Dave")
|
||||
}
|
||||
|
||||
return authorizer.DecisionAllow, "you're not dave", nil
|
||||
}
|
||||
|
||||
func alwaysAlice(req *http.Request) (user.Info, bool, error) {
|
||||
return &user.DefaultInfo{
|
||||
Name: "alice",
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
func TestSubjectAccessReview(t *testing.T) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authenticator = authenticator.RequestFunc(alwaysAlice)
|
||||
masterConfig.GenericConfig.Authorizer = sarAuthorizer{}
|
||||
masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[api.GroupName].GroupVersion()}})
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
sar *authorizationapi.SubjectAccessReview
|
||||
expectedError string
|
||||
expectedStatus authorizationapi.SubjectAccessReviewStatus
|
||||
}{
|
||||
{
|
||||
name: "simple allow",
|
||||
sar: &authorizationapi.SubjectAccessReview{
|
||||
Spec: authorizationapi.SubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationapi.ResourceAttributes{
|
||||
Verb: "list",
|
||||
Group: api.GroupName,
|
||||
Version: "v1",
|
||||
Resource: "pods",
|
||||
},
|
||||
User: "alice",
|
||||
},
|
||||
},
|
||||
expectedStatus: authorizationapi.SubjectAccessReviewStatus{
|
||||
Allowed: true,
|
||||
Reason: "you're not dave",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "simple deny",
|
||||
sar: &authorizationapi.SubjectAccessReview{
|
||||
Spec: authorizationapi.SubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationapi.ResourceAttributes{
|
||||
Verb: "list",
|
||||
Group: api.GroupName,
|
||||
Version: "v1",
|
||||
Resource: "pods",
|
||||
},
|
||||
User: "dave",
|
||||
},
|
||||
},
|
||||
expectedStatus: authorizationapi.SubjectAccessReviewStatus{
|
||||
Allowed: false,
|
||||
Reason: "no",
|
||||
EvaluationError: "I'm sorry, Dave",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "simple error",
|
||||
sar: &authorizationapi.SubjectAccessReview{
|
||||
Spec: authorizationapi.SubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationapi.ResourceAttributes{
|
||||
Verb: "list",
|
||||
Group: api.GroupName,
|
||||
Version: "v1",
|
||||
Resource: "pods",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: "at least one of user or group must be specified",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
response, err := clientset.Authorization().SubjectAccessReviews().Create(test.sar)
|
||||
switch {
|
||||
case err == nil && len(test.expectedError) == 0:
|
||||
|
||||
case err != nil && strings.Contains(err.Error(), test.expectedError):
|
||||
continue
|
||||
|
||||
case err != nil && len(test.expectedError) != 0:
|
||||
t.Errorf("%s: unexpected error: %v", test.name, err)
|
||||
continue
|
||||
default:
|
||||
t.Errorf("%s: expected %v, got %v", test.name, test.expectedError, err)
|
||||
continue
|
||||
}
|
||||
if response.Status != test.expectedStatus {
|
||||
t.Errorf("%s: expected %v, got %v", test.name, test.expectedStatus, response.Status)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelfSubjectAccessReview(t *testing.T) {
|
||||
username := "alice"
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authenticator = authenticator.RequestFunc(func(req *http.Request) (user.Info, bool, error) {
|
||||
return &user.DefaultInfo{Name: username}, true, nil
|
||||
})
|
||||
masterConfig.GenericConfig.Authorizer = sarAuthorizer{}
|
||||
masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[api.GroupName].GroupVersion()}})
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
username string
|
||||
sar *authorizationapi.SelfSubjectAccessReview
|
||||
expectedError string
|
||||
expectedStatus authorizationapi.SubjectAccessReviewStatus
|
||||
}{
|
||||
{
|
||||
name: "simple allow",
|
||||
username: "alice",
|
||||
sar: &authorizationapi.SelfSubjectAccessReview{
|
||||
Spec: authorizationapi.SelfSubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationapi.ResourceAttributes{
|
||||
Verb: "list",
|
||||
Group: api.GroupName,
|
||||
Version: "v1",
|
||||
Resource: "pods",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedStatus: authorizationapi.SubjectAccessReviewStatus{
|
||||
Allowed: true,
|
||||
Reason: "you're not dave",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "simple deny",
|
||||
username: "dave",
|
||||
sar: &authorizationapi.SelfSubjectAccessReview{
|
||||
Spec: authorizationapi.SelfSubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationapi.ResourceAttributes{
|
||||
Verb: "list",
|
||||
Group: api.GroupName,
|
||||
Version: "v1",
|
||||
Resource: "pods",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedStatus: authorizationapi.SubjectAccessReviewStatus{
|
||||
Allowed: false,
|
||||
Reason: "no",
|
||||
EvaluationError: "I'm sorry, Dave",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
username = test.username
|
||||
|
||||
response, err := clientset.Authorization().SelfSubjectAccessReviews().Create(test.sar)
|
||||
switch {
|
||||
case err == nil && len(test.expectedError) == 0:
|
||||
|
||||
case err != nil && strings.Contains(err.Error(), test.expectedError):
|
||||
continue
|
||||
|
||||
case err != nil && len(test.expectedError) != 0:
|
||||
t.Errorf("%s: unexpected error: %v", test.name, err)
|
||||
continue
|
||||
default:
|
||||
t.Errorf("%s: expected %v, got %v", test.name, test.expectedError, err)
|
||||
continue
|
||||
}
|
||||
if response.Status != test.expectedStatus {
|
||||
t.Errorf("%s: expected %v, got %v", test.name, test.expectedStatus, response.Status)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalSubjectAccessReview(t *testing.T) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authenticator = authenticator.RequestFunc(alwaysAlice)
|
||||
masterConfig.GenericConfig.Authorizer = sarAuthorizer{}
|
||||
masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[api.GroupName].GroupVersion()}})
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
namespace string
|
||||
sar *authorizationapi.LocalSubjectAccessReview
|
||||
expectedError string
|
||||
expectedStatus authorizationapi.SubjectAccessReviewStatus
|
||||
}{
|
||||
{
|
||||
name: "simple allow",
|
||||
namespace: "foo",
|
||||
sar: &authorizationapi.LocalSubjectAccessReview{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "foo"},
|
||||
Spec: authorizationapi.SubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationapi.ResourceAttributes{
|
||||
Verb: "list",
|
||||
Group: api.GroupName,
|
||||
Version: "v1",
|
||||
Resource: "pods",
|
||||
Namespace: "foo",
|
||||
},
|
||||
User: "alice",
|
||||
},
|
||||
},
|
||||
expectedStatus: authorizationapi.SubjectAccessReviewStatus{
|
||||
Allowed: true,
|
||||
Reason: "you're not dave",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "simple deny",
|
||||
namespace: "foo",
|
||||
sar: &authorizationapi.LocalSubjectAccessReview{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "foo"},
|
||||
Spec: authorizationapi.SubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationapi.ResourceAttributes{
|
||||
Verb: "list",
|
||||
Group: api.GroupName,
|
||||
Version: "v1",
|
||||
Resource: "pods",
|
||||
Namespace: "foo",
|
||||
},
|
||||
User: "dave",
|
||||
},
|
||||
},
|
||||
expectedStatus: authorizationapi.SubjectAccessReviewStatus{
|
||||
Allowed: false,
|
||||
Reason: "no",
|
||||
EvaluationError: "I'm sorry, Dave",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "conflicting namespace",
|
||||
namespace: "foo",
|
||||
sar: &authorizationapi.LocalSubjectAccessReview{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "foo"},
|
||||
Spec: authorizationapi.SubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationapi.ResourceAttributes{
|
||||
Verb: "list",
|
||||
Group: api.GroupName,
|
||||
Version: "v1",
|
||||
Resource: "pods",
|
||||
Namespace: "bar",
|
||||
},
|
||||
User: "dave",
|
||||
},
|
||||
},
|
||||
expectedError: "must match metadata.namespace",
|
||||
},
|
||||
{
|
||||
name: "missing namespace",
|
||||
namespace: "foo",
|
||||
sar: &authorizationapi.LocalSubjectAccessReview{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "foo"},
|
||||
Spec: authorizationapi.SubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationapi.ResourceAttributes{
|
||||
Verb: "list",
|
||||
Group: api.GroupName,
|
||||
Version: "v1",
|
||||
Resource: "pods",
|
||||
},
|
||||
User: "dave",
|
||||
},
|
||||
},
|
||||
expectedError: "must match metadata.namespace",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
response, err := clientset.Authorization().LocalSubjectAccessReviews(test.namespace).Create(test.sar)
|
||||
switch {
|
||||
case err == nil && len(test.expectedError) == 0:
|
||||
|
||||
case err != nil && strings.Contains(err.Error(), test.expectedError):
|
||||
continue
|
||||
|
||||
case err != nil && len(test.expectedError) != 0:
|
||||
t.Errorf("%s: unexpected error: %v", test.name, err)
|
||||
continue
|
||||
default:
|
||||
t.Errorf("%s: expected %v, got %v", test.name, test.expectedError, err)
|
||||
continue
|
||||
}
|
||||
if response.Status != test.expectedStatus {
|
||||
t.Errorf("%s: expected %#v, got %#v", test.name, test.expectedStatus, response.Status)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
1286
vendor/k8s.io/kubernetes/test/integration/auth/auth_test.go
generated
vendored
Normal file
1286
vendor/k8s.io/kubernetes/test/integration/auth/auth_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
188
vendor/k8s.io/kubernetes/test/integration/auth/bootstraptoken_test.go
generated
vendored
Normal file
188
vendor/k8s.io/kubernetes/test/integration/auth/bootstraptoken_test.go
generated
vendored
Normal file
@ -0,0 +1,188 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/admit"
|
||||
"k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap"
|
||||
bootstraputil "k8s.io/kubernetes/test/e2e/lifecycle/bootstrap"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
type bootstrapSecrets []*api.Secret
|
||||
|
||||
func (b bootstrapSecrets) List(selector labels.Selector) (ret []*api.Secret, err error) {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (b bootstrapSecrets) Get(name string) (*api.Secret, error) {
|
||||
return b[0], nil
|
||||
}
|
||||
|
||||
// TestBootstrapTokenAuth tests the bootstrap token auth provider
|
||||
func TestBootstrapTokenAuth(t *testing.T) {
|
||||
tokenId, err := bootstraputil.GenerateTokenId()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
secret, err := bootstraputil.GenerateTokenSecret()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
var bootstrapSecretValid = &api.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
Name: bootstrapapi.BootstrapTokenSecretPrefix,
|
||||
},
|
||||
Type: api.SecretTypeBootstrapToken,
|
||||
Data: map[string][]byte{
|
||||
bootstrapapi.BootstrapTokenIDKey: []byte(tokenId),
|
||||
bootstrapapi.BootstrapTokenSecretKey: []byte(secret),
|
||||
bootstrapapi.BootstrapTokenUsageAuthentication: []byte("true"),
|
||||
},
|
||||
}
|
||||
var bootstrapSecretInvalid = &api.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
Name: bootstrapapi.BootstrapTokenSecretPrefix,
|
||||
},
|
||||
Type: api.SecretTypeBootstrapToken,
|
||||
Data: map[string][]byte{
|
||||
bootstrapapi.BootstrapTokenIDKey: []byte(tokenId),
|
||||
bootstrapapi.BootstrapTokenSecretKey: []byte("invalid"),
|
||||
bootstrapapi.BootstrapTokenUsageAuthentication: []byte("true"),
|
||||
},
|
||||
}
|
||||
var expiredBootstrapToken = &api.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
Name: bootstrapapi.BootstrapTokenSecretPrefix,
|
||||
},
|
||||
Type: api.SecretTypeBootstrapToken,
|
||||
Data: map[string][]byte{
|
||||
bootstrapapi.BootstrapTokenIDKey: []byte(tokenId),
|
||||
bootstrapapi.BootstrapTokenSecretKey: []byte("invalid"),
|
||||
bootstrapapi.BootstrapTokenUsageAuthentication: []byte("true"),
|
||||
bootstrapapi.BootstrapTokenExpirationKey: []byte(bootstraputil.TimeStringFromNow(-time.Hour)),
|
||||
},
|
||||
}
|
||||
type request struct {
|
||||
verb string
|
||||
URL string
|
||||
body string
|
||||
statusCodes map[int]bool // Set of expected resp.StatusCode if all goes well.
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
request request
|
||||
secret *api.Secret
|
||||
}{
|
||||
{
|
||||
name: "valid token",
|
||||
request: request{verb: "GET", URL: path("pods", "", ""), body: "", statusCodes: integration.Code200},
|
||||
secret: bootstrapSecretValid,
|
||||
},
|
||||
{
|
||||
name: "invalid token format",
|
||||
request: request{verb: "GET", URL: path("pods", "", ""), body: "", statusCodes: integration.Code401},
|
||||
secret: bootstrapSecretInvalid,
|
||||
},
|
||||
{
|
||||
name: "invalid token expired",
|
||||
request: request{verb: "GET", URL: path("pods", "", ""), body: "", statusCodes: integration.Code401},
|
||||
secret: expiredBootstrapToken,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
|
||||
authenticator := bearertoken.New(bootstrap.NewTokenAuthenticator(bootstrapSecrets{test.secret}))
|
||||
// Set up a master
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authenticator = authenticator
|
||||
masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("auth-bootstrap-token", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
previousResourceVersion := make(map[string]float64)
|
||||
transport := http.DefaultTransport
|
||||
|
||||
token := tokenId + "." + secret
|
||||
var bodyStr string
|
||||
if test.request.body != "" {
|
||||
sub := ""
|
||||
if test.request.verb == "PUT" {
|
||||
// For update operations, insert previous resource version
|
||||
if resVersion := previousResourceVersion[getPreviousResourceVersionKey(test.request.URL, "")]; resVersion != 0 {
|
||||
sub += fmt.Sprintf(",\r\n\"resourceVersion\": \"%v\"", resVersion)
|
||||
}
|
||||
sub += fmt.Sprintf(",\r\n\"namespace\": %q", ns.Name)
|
||||
}
|
||||
bodyStr = fmt.Sprintf(test.request.body, sub)
|
||||
}
|
||||
test.request.body = bodyStr
|
||||
bodyBytes := bytes.NewReader([]byte(bodyStr))
|
||||
req, err := http.NewRequest(test.request.verb, s.URL+test.request.URL, bodyBytes)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
|
||||
if test.request.verb == "PATCH" {
|
||||
req.Header.Set("Content-Type", "application/merge-patch+json")
|
||||
}
|
||||
|
||||
func() {
|
||||
resp, err := transport.RoundTrip(req)
|
||||
defer resp.Body.Close()
|
||||
if err != nil {
|
||||
t.Logf("case %v", test.name)
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
b, _ := ioutil.ReadAll(resp.Body)
|
||||
if _, ok := test.request.statusCodes[resp.StatusCode]; !ok {
|
||||
t.Logf("case %v", test.name)
|
||||
t.Errorf("Expected status one of %v, but got %v", test.request.statusCodes, resp.StatusCode)
|
||||
t.Errorf("Body: %v", string(b))
|
||||
} else {
|
||||
if test.request.verb == "POST" {
|
||||
// For successful create operations, extract resourceVersion
|
||||
id, currentResourceVersion, err := parseResourceVersion(b)
|
||||
if err == nil {
|
||||
key := getPreviousResourceVersionKey(test.request.URL, id)
|
||||
previousResourceVersion[key] = currentResourceVersion
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}()
|
||||
}
|
||||
}
|
27
vendor/k8s.io/kubernetes/test/integration/auth/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/auth/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
408
vendor/k8s.io/kubernetes/test/integration/auth/node_test.go
generated
vendored
Normal file
408
vendor/k8s.io/kubernetes/test/integration/auth/node_test.go
generated
vendored
Normal file
@ -0,0 +1,408 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
|
||||
"k8s.io/apiserver/pkg/authentication/token/tokenfile"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
"k8s.io/kubernetes/pkg/auth/nodeidentifier"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubeapiserver/authorizer"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/noderestriction"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestNodeAuthorizer(t *testing.T) {
|
||||
// Start the server so we know the address
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
apiServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-h.Initialized
|
||||
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
|
||||
const (
|
||||
// Define credentials
|
||||
tokenMaster = "master-token"
|
||||
tokenNodeUnknown = "unknown-token"
|
||||
tokenNode1 = "node1-token"
|
||||
tokenNode2 = "node2-token"
|
||||
)
|
||||
|
||||
authenticator := bearertoken.New(tokenfile.New(map[string]*user.DefaultInfo{
|
||||
tokenMaster: {Name: "admin", Groups: []string{"system:masters"}},
|
||||
tokenNodeUnknown: {Name: "unknown", Groups: []string{"system:nodes"}},
|
||||
tokenNode1: {Name: "system:node:node1", Groups: []string{"system:nodes"}},
|
||||
tokenNode2: {Name: "system:node:node2", Groups: []string{"system:nodes"}},
|
||||
}))
|
||||
|
||||
// Build client config, clientset, and informers
|
||||
clientConfig := &restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}}
|
||||
superuserClient := clientsetForToken(tokenMaster, clientConfig)
|
||||
informerFactory := informers.NewSharedInformerFactory(superuserClient, time.Minute)
|
||||
|
||||
// Set up Node+RBAC authorizer
|
||||
authorizerConfig := &authorizer.AuthorizationConfig{
|
||||
AuthorizationModes: []string{"Node", "RBAC"},
|
||||
InformerFactory: informerFactory,
|
||||
}
|
||||
nodeRBACAuthorizer, _, err := authorizerConfig.New()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Set up NodeRestriction admission
|
||||
nodeRestrictionAdmission := noderestriction.NewPlugin(nodeidentifier.NewDefaultNodeIdentifier())
|
||||
nodeRestrictionAdmission.SetInternalKubeClientSet(superuserClient)
|
||||
if err := nodeRestrictionAdmission.ValidateInitialization(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Start the server
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authenticator = authenticator
|
||||
|
||||
masterConfig.GenericConfig.Authorizer = nodeRBACAuthorizer
|
||||
masterConfig.GenericConfig.AdmissionControl = nodeRestrictionAdmission
|
||||
_, _, closeFn := framework.RunAMasterUsingServer(masterConfig, apiServer, h)
|
||||
defer closeFn()
|
||||
|
||||
// Start the informers
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informerFactory.Start(stopCh)
|
||||
|
||||
// Wait for a healthy server
|
||||
for {
|
||||
result := superuserClient.Core().RESTClient().Get().AbsPath("/healthz").Do()
|
||||
_, err := result.Raw()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
t.Log(err)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
// Create objects
|
||||
if _, err := superuserClient.Core().Secrets("ns").Create(&api.Secret{ObjectMeta: metav1.ObjectMeta{Name: "mysecret"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := superuserClient.Core().Secrets("ns").Create(&api.Secret{ObjectMeta: metav1.ObjectMeta{Name: "mypvsecret"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := superuserClient.Core().ConfigMaps("ns").Create(&api.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "myconfigmap"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := superuserClient.Core().PersistentVolumeClaims("ns").Create(&api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "mypvc"},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadOnlyMany},
|
||||
Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceStorage: resource.MustParse("1")}},
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := superuserClient.Core().PersistentVolumes().Create(&api.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "mypv"},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadOnlyMany},
|
||||
Capacity: api.ResourceList{api.ResourceStorage: resource.MustParse("1")},
|
||||
ClaimRef: &api.ObjectReference{Namespace: "ns", Name: "mypvc"},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{AzureFile: &api.AzureFilePersistentVolumeSource{ShareName: "default", SecretName: "mypvsecret"}},
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
getSecret := func(client clientset.Interface) error {
|
||||
_, err := client.Core().Secrets("ns").Get("mysecret", metav1.GetOptions{})
|
||||
return err
|
||||
}
|
||||
getPVSecret := func(client clientset.Interface) error {
|
||||
_, err := client.Core().Secrets("ns").Get("mypvsecret", metav1.GetOptions{})
|
||||
return err
|
||||
}
|
||||
getConfigMap := func(client clientset.Interface) error {
|
||||
_, err := client.Core().ConfigMaps("ns").Get("myconfigmap", metav1.GetOptions{})
|
||||
return err
|
||||
}
|
||||
getPVC := func(client clientset.Interface) error {
|
||||
_, err := client.Core().PersistentVolumeClaims("ns").Get("mypvc", metav1.GetOptions{})
|
||||
return err
|
||||
}
|
||||
getPV := func(client clientset.Interface) error {
|
||||
_, err := client.Core().PersistentVolumes().Get("mypv", metav1.GetOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
createNode2NormalPod := func(client clientset.Interface) error {
|
||||
_, err := client.Core().Pods("ns").Create(&api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"},
|
||||
Spec: api.PodSpec{
|
||||
NodeName: "node2",
|
||||
Containers: []api.Container{{Name: "image", Image: "busybox"}},
|
||||
Volumes: []api.Volume{
|
||||
{Name: "secret", VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "mysecret"}}},
|
||||
{Name: "cm", VolumeSource: api.VolumeSource{ConfigMap: &api.ConfigMapVolumeSource{LocalObjectReference: api.LocalObjectReference{Name: "myconfigmap"}}}},
|
||||
{Name: "pvc", VolumeSource: api.VolumeSource{PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ClaimName: "mypvc"}}},
|
||||
},
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
updateNode2NormalPodStatus := func(client clientset.Interface) error {
|
||||
startTime := metav1.NewTime(time.Now())
|
||||
_, err := client.Core().Pods("ns").UpdateStatus(&api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"},
|
||||
Status: api.PodStatus{StartTime: &startTime},
|
||||
})
|
||||
return err
|
||||
}
|
||||
deleteNode2NormalPod := func(client clientset.Interface) error {
|
||||
zero := int64(0)
|
||||
return client.Core().Pods("ns").Delete("node2normalpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero})
|
||||
}
|
||||
|
||||
createNode2MirrorPod := func(client clientset.Interface) error {
|
||||
_, err := client.Core().Pods("ns").Create(&api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node2mirrorpod",
|
||||
Annotations: map[string]string{api.MirrorPodAnnotationKey: "true"},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
NodeName: "node2",
|
||||
Containers: []api.Container{{Name: "image", Image: "busybox"}},
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
deleteNode2MirrorPod := func(client clientset.Interface) error {
|
||||
zero := int64(0)
|
||||
return client.Core().Pods("ns").Delete("node2mirrorpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero})
|
||||
}
|
||||
|
||||
createNode2 := func(client clientset.Interface) error {
|
||||
_, err := client.Core().Nodes().Create(&api.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2"}})
|
||||
return err
|
||||
}
|
||||
updateNode2Status := func(client clientset.Interface) error {
|
||||
_, err := client.Core().Nodes().UpdateStatus(&api.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node2"},
|
||||
Status: api.NodeStatus{},
|
||||
})
|
||||
return err
|
||||
}
|
||||
deleteNode2 := func(client clientset.Interface) error {
|
||||
return client.Core().Nodes().Delete("node2", nil)
|
||||
}
|
||||
createNode2NormalPodEviction := func(client clientset.Interface) error {
|
||||
return client.Policy().Evictions("ns").Evict(&policy.Eviction{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "policy/v1beta1",
|
||||
Kind: "Eviction",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node2normalpod",
|
||||
Namespace: "ns",
|
||||
},
|
||||
})
|
||||
}
|
||||
createNode2MirrorPodEviction := func(client clientset.Interface) error {
|
||||
return client.Policy().Evictions("ns").Evict(&policy.Eviction{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "policy/v1beta1",
|
||||
Kind: "Eviction",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node2mirrorpod",
|
||||
Namespace: "ns",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
capacity := 50
|
||||
updatePVCCapacity := func(client clientset.Interface) error {
|
||||
capacity++
|
||||
statusString := fmt.Sprintf("{\"status\": {\"capacity\": {\"storage\": \"%dG\"}}}", capacity)
|
||||
patchBytes := []byte(statusString)
|
||||
_, err := client.Core().PersistentVolumeClaims("ns").Patch("mypvc", types.StrategicMergePatchType, patchBytes, "status")
|
||||
return err
|
||||
}
|
||||
|
||||
updatePVCPhase := func(client clientset.Interface) error {
|
||||
patchBytes := []byte(`{"status":{"phase": "Bound"}}`)
|
||||
_, err := client.Core().PersistentVolumeClaims("ns").Patch("mypvc", types.StrategicMergePatchType, patchBytes, "status")
|
||||
return err
|
||||
}
|
||||
|
||||
nodeanonClient := clientsetForToken(tokenNodeUnknown, clientConfig)
|
||||
node1Client := clientsetForToken(tokenNode1, clientConfig)
|
||||
node2Client := clientsetForToken(tokenNode2, clientConfig)
|
||||
|
||||
// all node requests from node1 and unknown node fail
|
||||
expectForbidden(t, getSecret(nodeanonClient))
|
||||
expectForbidden(t, getPVSecret(nodeanonClient))
|
||||
expectForbidden(t, getConfigMap(nodeanonClient))
|
||||
expectForbidden(t, getPVC(nodeanonClient))
|
||||
expectForbidden(t, getPV(nodeanonClient))
|
||||
expectForbidden(t, createNode2NormalPod(nodeanonClient))
|
||||
expectForbidden(t, createNode2MirrorPod(nodeanonClient))
|
||||
expectForbidden(t, deleteNode2NormalPod(nodeanonClient))
|
||||
expectForbidden(t, deleteNode2MirrorPod(nodeanonClient))
|
||||
expectForbidden(t, createNode2MirrorPodEviction(nodeanonClient))
|
||||
expectForbidden(t, createNode2(nodeanonClient))
|
||||
expectForbidden(t, updateNode2Status(nodeanonClient))
|
||||
expectForbidden(t, deleteNode2(nodeanonClient))
|
||||
|
||||
expectForbidden(t, getSecret(node1Client))
|
||||
expectForbidden(t, getPVSecret(node1Client))
|
||||
expectForbidden(t, getConfigMap(node1Client))
|
||||
expectForbidden(t, getPVC(node1Client))
|
||||
expectForbidden(t, getPV(node1Client))
|
||||
expectForbidden(t, createNode2NormalPod(nodeanonClient))
|
||||
expectForbidden(t, createNode2MirrorPod(node1Client))
|
||||
expectNotFound(t, deleteNode2MirrorPod(node1Client))
|
||||
expectNotFound(t, createNode2MirrorPodEviction(node1Client))
|
||||
expectForbidden(t, createNode2(node1Client))
|
||||
expectForbidden(t, updateNode2Status(node1Client))
|
||||
expectForbidden(t, deleteNode2(node1Client))
|
||||
|
||||
// related object requests from node2 fail
|
||||
expectForbidden(t, getSecret(node2Client))
|
||||
expectForbidden(t, getPVSecret(node2Client))
|
||||
expectForbidden(t, getConfigMap(node2Client))
|
||||
expectForbidden(t, getPVC(node2Client))
|
||||
expectForbidden(t, getPV(node2Client))
|
||||
|
||||
expectForbidden(t, createNode2NormalPod(nodeanonClient))
|
||||
// mirror pod and self node lifecycle is allowed
|
||||
expectAllowed(t, createNode2MirrorPod(node2Client))
|
||||
expectAllowed(t, deleteNode2MirrorPod(node2Client))
|
||||
expectAllowed(t, createNode2MirrorPod(node2Client))
|
||||
expectAllowed(t, createNode2MirrorPodEviction(node2Client))
|
||||
expectAllowed(t, createNode2(node2Client))
|
||||
expectAllowed(t, updateNode2Status(node2Client))
|
||||
expectAllowed(t, deleteNode2(node2Client))
|
||||
|
||||
// create a pod as an admin to add object references
|
||||
expectAllowed(t, createNode2NormalPod(superuserClient))
|
||||
|
||||
// unidentifiable node and node1 are still forbidden
|
||||
expectForbidden(t, getSecret(nodeanonClient))
|
||||
expectForbidden(t, getPVSecret(nodeanonClient))
|
||||
expectForbidden(t, getConfigMap(nodeanonClient))
|
||||
expectForbidden(t, getPVC(nodeanonClient))
|
||||
expectForbidden(t, getPV(nodeanonClient))
|
||||
expectForbidden(t, createNode2NormalPod(nodeanonClient))
|
||||
expectForbidden(t, updateNode2NormalPodStatus(nodeanonClient))
|
||||
expectForbidden(t, deleteNode2NormalPod(nodeanonClient))
|
||||
expectForbidden(t, createNode2NormalPodEviction(nodeanonClient))
|
||||
expectForbidden(t, createNode2MirrorPod(nodeanonClient))
|
||||
expectForbidden(t, deleteNode2MirrorPod(nodeanonClient))
|
||||
expectForbidden(t, createNode2MirrorPodEviction(nodeanonClient))
|
||||
|
||||
expectForbidden(t, getSecret(node1Client))
|
||||
expectForbidden(t, getPVSecret(node1Client))
|
||||
expectForbidden(t, getConfigMap(node1Client))
|
||||
expectForbidden(t, getPVC(node1Client))
|
||||
expectForbidden(t, getPV(node1Client))
|
||||
expectForbidden(t, createNode2NormalPod(node1Client))
|
||||
expectForbidden(t, updateNode2NormalPodStatus(node1Client))
|
||||
expectForbidden(t, deleteNode2NormalPod(node1Client))
|
||||
expectForbidden(t, createNode2NormalPodEviction(node1Client))
|
||||
expectForbidden(t, createNode2MirrorPod(node1Client))
|
||||
expectNotFound(t, deleteNode2MirrorPod(node1Client))
|
||||
expectNotFound(t, createNode2MirrorPodEviction(node1Client))
|
||||
|
||||
// node2 can get referenced objects now
|
||||
expectAllowed(t, getSecret(node2Client))
|
||||
expectAllowed(t, getPVSecret(node2Client))
|
||||
expectAllowed(t, getConfigMap(node2Client))
|
||||
expectAllowed(t, getPVC(node2Client))
|
||||
expectAllowed(t, getPV(node2Client))
|
||||
|
||||
expectForbidden(t, createNode2NormalPod(node2Client))
|
||||
expectAllowed(t, updateNode2NormalPodStatus(node2Client))
|
||||
expectAllowed(t, deleteNode2NormalPod(node2Client))
|
||||
expectAllowed(t, createNode2MirrorPod(node2Client))
|
||||
expectAllowed(t, deleteNode2MirrorPod(node2Client))
|
||||
// recreate as an admin to test eviction
|
||||
expectAllowed(t, createNode2NormalPod(superuserClient))
|
||||
expectAllowed(t, createNode2MirrorPod(superuserClient))
|
||||
expectAllowed(t, createNode2NormalPodEviction(node2Client))
|
||||
expectAllowed(t, createNode2MirrorPodEviction(node2Client))
|
||||
|
||||
// re-create a pod as an admin to add object references
|
||||
expectAllowed(t, createNode2NormalPod(superuserClient))
|
||||
// With ExpandPersistentVolumes feature disabled
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExpandPersistentVolumes, false)()
|
||||
// node->pvc relationship not established
|
||||
expectForbidden(t, updatePVCCapacity(node1Client))
|
||||
// node->pvc relationship established but feature is disabled
|
||||
expectForbidden(t, updatePVCCapacity(node2Client))
|
||||
|
||||
//Enabled ExpandPersistentVolumes feature
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExpandPersistentVolumes, true)()
|
||||
// Node->pvc relationship not established
|
||||
expectForbidden(t, updatePVCCapacity(node1Client))
|
||||
// node->pvc relationship established and feature is enabled
|
||||
expectAllowed(t, updatePVCCapacity(node2Client))
|
||||
// node->pvc relationship established but updating phase
|
||||
expectForbidden(t, updatePVCPhase(node2Client))
|
||||
}
|
||||
|
||||
func expectForbidden(t *testing.T, err error) {
|
||||
if !errors.IsForbidden(err) {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
t.Errorf("%s:%d: Expected forbidden error, got %v", filepath.Base(file), line, err)
|
||||
}
|
||||
}
|
||||
|
||||
func expectNotFound(t *testing.T, err error) {
|
||||
if !errors.IsNotFound(err) {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
t.Errorf("%s:%d: Expected notfound error, got %v", filepath.Base(file), line, err)
|
||||
}
|
||||
}
|
||||
|
||||
func expectAllowed(t *testing.T, err error) {
|
||||
if err != nil {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
t.Errorf("%s:%d: Expected no error, got %v", filepath.Base(file), line, err)
|
||||
}
|
||||
}
|
562
vendor/k8s.io/kubernetes/test/integration/auth/rbac_test.go
generated
vendored
Normal file
562
vendor/k8s.io/kubernetes/test/integration/auth/rbac_test.go
generated
vendored
Normal file
@ -0,0 +1,562 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
|
||||
"k8s.io/apiserver/pkg/authentication/token/tokenfile"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/transport"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
rbacapi "k8s.io/kubernetes/pkg/apis/rbac"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/master"
|
||||
"k8s.io/kubernetes/pkg/registry/rbac/clusterrole"
|
||||
clusterrolestore "k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage"
|
||||
"k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding"
|
||||
clusterrolebindingstore "k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/storage"
|
||||
"k8s.io/kubernetes/pkg/registry/rbac/role"
|
||||
rolestore "k8s.io/kubernetes/pkg/registry/rbac/role/storage"
|
||||
"k8s.io/kubernetes/pkg/registry/rbac/rolebinding"
|
||||
rolebindingstore "k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage"
|
||||
"k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func clientForToken(user string) *http.Client {
|
||||
return &http.Client{
|
||||
Transport: transport.NewBearerAuthRoundTripper(
|
||||
user,
|
||||
transport.DebugWrappers(http.DefaultTransport),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func clientsetForToken(user string, config *restclient.Config) clientset.Interface {
|
||||
configCopy := *config
|
||||
configCopy.BearerToken = user
|
||||
return clientset.NewForConfigOrDie(&configCopy)
|
||||
}
|
||||
|
||||
type testRESTOptionsGetter struct {
|
||||
config *master.Config
|
||||
}
|
||||
|
||||
func (getter *testRESTOptionsGetter) GetRESTOptions(resource schema.GroupResource) (generic.RESTOptions, error) {
|
||||
storageConfig, err := getter.config.ExtraConfig.StorageFactory.NewConfig(resource)
|
||||
if err != nil {
|
||||
return generic.RESTOptions{}, fmt.Errorf("failed to get storage: %v", err)
|
||||
}
|
||||
return generic.RESTOptions{StorageConfig: storageConfig, Decorator: generic.UndecoratedStorage, ResourcePrefix: resource.Resource}, nil
|
||||
}
|
||||
|
||||
func newRBACAuthorizer(config *master.Config) authorizer.Authorizer {
|
||||
optsGetter := &testRESTOptionsGetter{config}
|
||||
roleRegistry := role.AuthorizerAdapter{Registry: role.NewRegistry(rolestore.NewREST(optsGetter))}
|
||||
roleBindingRegistry := rolebinding.AuthorizerAdapter{Registry: rolebinding.NewRegistry(rolebindingstore.NewREST(optsGetter))}
|
||||
clusterRoleRegistry := clusterrole.AuthorizerAdapter{Registry: clusterrole.NewRegistry(clusterrolestore.NewREST(optsGetter))}
|
||||
clusterRoleBindingRegistry := clusterrolebinding.AuthorizerAdapter{Registry: clusterrolebinding.NewRegistry(clusterrolebindingstore.NewREST(optsGetter))}
|
||||
return rbac.New(roleRegistry, roleBindingRegistry, clusterRoleRegistry, clusterRoleBindingRegistry)
|
||||
}
|
||||
|
||||
// bootstrapRoles are a set of RBAC roles which will be populated before the test.
|
||||
type bootstrapRoles struct {
|
||||
roles []rbacapi.Role
|
||||
roleBindings []rbacapi.RoleBinding
|
||||
clusterRoles []rbacapi.ClusterRole
|
||||
clusterRoleBindings []rbacapi.ClusterRoleBinding
|
||||
}
|
||||
|
||||
// bootstrap uses the provided client to create the bootstrap roles and role bindings.
|
||||
//
|
||||
// client should be authenticated as the RBAC super user.
|
||||
func (b bootstrapRoles) bootstrap(client clientset.Interface) error {
|
||||
for _, r := range b.clusterRoles {
|
||||
_, err := client.Rbac().ClusterRoles().Create(&r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make request: %v", err)
|
||||
}
|
||||
}
|
||||
for _, r := range b.roles {
|
||||
_, err := client.Rbac().Roles(r.Namespace).Create(&r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make request: %v", err)
|
||||
}
|
||||
}
|
||||
for _, r := range b.clusterRoleBindings {
|
||||
_, err := client.Rbac().ClusterRoleBindings().Create(&r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make request: %v", err)
|
||||
}
|
||||
}
|
||||
for _, r := range b.roleBindings {
|
||||
_, err := client.Rbac().RoleBindings(r.Namespace).Create(&r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make request: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// request is a test case which can.
|
||||
type request struct {
|
||||
// The bearer token sent as part of the request
|
||||
token string
|
||||
|
||||
// Resource metadata
|
||||
verb string
|
||||
apiGroup string
|
||||
resource string
|
||||
namespace string
|
||||
name string
|
||||
|
||||
// The actual resource.
|
||||
body string
|
||||
|
||||
// The expected return status of this request.
|
||||
expectedStatus int
|
||||
}
|
||||
|
||||
func (r request) String() string {
|
||||
return fmt.Sprintf("%s %s %s", r.token, r.verb, r.resource)
|
||||
}
|
||||
|
||||
type statusCode int
|
||||
|
||||
func (s statusCode) String() string {
|
||||
return fmt.Sprintf("%d %s", int(s), http.StatusText(int(s)))
|
||||
}
|
||||
|
||||
// Declare a set of raw objects to use.
|
||||
var (
|
||||
// Make a role binding with the version enabled in testapi.Rbac
|
||||
// This assumes testapi is using rbac.authorization.k8s.io/v1beta1 or rbac.authorization.k8s.io/v1, which are identical in structure.
|
||||
// TODO: rework or remove testapi usage to allow writing integration tests that don't depend on envvars
|
||||
writeJobsRoleBinding = `
|
||||
{
|
||||
"apiVersion": "` + testapi.Rbac.GroupVersion().String() + `",
|
||||
"kind": "RoleBinding",
|
||||
"metadata": {
|
||||
"name": "pi"%s
|
||||
},
|
||||
"roleRef": {
|
||||
"apiGroup": "rbac.authorization.k8s.io",
|
||||
"kind": "ClusterRole",
|
||||
"name": "write-jobs"
|
||||
},
|
||||
"subjects": [{
|
||||
"apiGroup": "rbac.authorization.k8s.io",
|
||||
"kind": "User",
|
||||
"name": "admin"
|
||||
}]
|
||||
}`
|
||||
|
||||
aJob = `
|
||||
{
|
||||
"apiVersion": "batch/v1",
|
||||
"kind": "Job",
|
||||
"metadata": {
|
||||
"name": "pi"%s
|
||||
},
|
||||
"spec": {
|
||||
"template": {
|
||||
"metadata": {
|
||||
"name": "a",
|
||||
"labels": {
|
||||
"name": "pijob"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"name": "pi",
|
||||
"image": "perl",
|
||||
"command": [
|
||||
"perl",
|
||||
"-Mbignum=bpi",
|
||||
"-wle",
|
||||
"print bpi(2000)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"restartPolicy": "Never"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
podNamespace = `
|
||||
{
|
||||
"apiVersion": "` + testapi.Groups[api.GroupName].GroupVersion().String() + `",
|
||||
"kind": "Namespace",
|
||||
"metadata": {
|
||||
"name": "pod-namespace"%s
|
||||
}
|
||||
}
|
||||
`
|
||||
jobNamespace = `
|
||||
{
|
||||
"apiVersion": "` + testapi.Groups[api.GroupName].GroupVersion().String() + `",
|
||||
"kind": "Namespace",
|
||||
"metadata": {
|
||||
"name": "job-namespace"%s
|
||||
}
|
||||
}
|
||||
`
|
||||
forbiddenNamespace = `
|
||||
{
|
||||
"apiVersion": "` + testapi.Groups[api.GroupName].GroupVersion().String() + `",
|
||||
"kind": "Namespace",
|
||||
"metadata": {
|
||||
"name": "forbidden-namespace"%s
|
||||
}
|
||||
}
|
||||
`
|
||||
)
|
||||
|
||||
// Declare some PolicyRules beforehand.
|
||||
var (
|
||||
ruleAllowAll = rbacapi.NewRule("*").Groups("*").Resources("*").RuleOrDie()
|
||||
ruleReadPods = rbacapi.NewRule("list", "get", "watch").Groups("").Resources("pods").RuleOrDie()
|
||||
ruleWriteJobs = rbacapi.NewRule("*").Groups("batch").Resources("*").RuleOrDie()
|
||||
)
|
||||
|
||||
func TestRBAC(t *testing.T) {
|
||||
superUser := "admin/system:masters"
|
||||
|
||||
tests := []struct {
|
||||
bootstrapRoles bootstrapRoles
|
||||
|
||||
requests []request
|
||||
}{
|
||||
{
|
||||
bootstrapRoles: bootstrapRoles{
|
||||
clusterRoles: []rbacapi.ClusterRole{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "allow-all"},
|
||||
Rules: []rbacapi.PolicyRule{ruleAllowAll},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "read-pods"},
|
||||
Rules: []rbacapi.PolicyRule{ruleReadPods},
|
||||
},
|
||||
},
|
||||
clusterRoleBindings: []rbacapi.ClusterRoleBinding{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "read-pods"},
|
||||
Subjects: []rbacapi.Subject{
|
||||
{Kind: "User", Name: "pod-reader"},
|
||||
},
|
||||
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "read-pods"},
|
||||
},
|
||||
},
|
||||
},
|
||||
requests: []request{
|
||||
// Create the namespace used later in the test
|
||||
{superUser, "POST", "", "namespaces", "", "", podNamespace, http.StatusCreated},
|
||||
|
||||
{superUser, "GET", "", "pods", "", "", "", http.StatusOK},
|
||||
{superUser, "GET", "", "pods", "pod-namespace", "a", "", http.StatusNotFound},
|
||||
{superUser, "POST", "", "pods", "pod-namespace", "", aPod, http.StatusCreated},
|
||||
{superUser, "GET", "", "pods", "pod-namespace", "a", "", http.StatusOK},
|
||||
|
||||
{"bob", "GET", "", "pods", "", "", "", http.StatusForbidden},
|
||||
{"bob", "GET", "", "pods", "pod-namespace", "a", "", http.StatusForbidden},
|
||||
|
||||
{"pod-reader", "GET", "", "pods", "", "", "", http.StatusOK},
|
||||
{"pod-reader", "POST", "", "pods", "pod-namespace", "", aPod, http.StatusForbidden},
|
||||
},
|
||||
},
|
||||
{
|
||||
bootstrapRoles: bootstrapRoles{
|
||||
clusterRoles: []rbacapi.ClusterRole{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "write-jobs"},
|
||||
Rules: []rbacapi.PolicyRule{ruleWriteJobs},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "create-rolebindings"},
|
||||
Rules: []rbacapi.PolicyRule{
|
||||
rbacapi.NewRule("create").Groups("rbac.authorization.k8s.io").Resources("rolebindings").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "bind-any-clusterrole"},
|
||||
Rules: []rbacapi.PolicyRule{
|
||||
rbacapi.NewRule("bind").Groups("rbac.authorization.k8s.io").Resources("clusterroles").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
},
|
||||
clusterRoleBindings: []rbacapi.ClusterRoleBinding{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "write-jobs"},
|
||||
Subjects: []rbacapi.Subject{{Kind: "User", Name: "job-writer"}},
|
||||
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "write-jobs"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "create-rolebindings"},
|
||||
Subjects: []rbacapi.Subject{
|
||||
{Kind: "User", Name: "job-writer"},
|
||||
{Kind: "User", Name: "nonescalating-rolebinding-writer"},
|
||||
{Kind: "User", Name: "any-rolebinding-writer"},
|
||||
},
|
||||
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "create-rolebindings"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "bind-any-clusterrole"},
|
||||
Subjects: []rbacapi.Subject{{Kind: "User", Name: "any-rolebinding-writer"}},
|
||||
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "bind-any-clusterrole"},
|
||||
},
|
||||
},
|
||||
roleBindings: []rbacapi.RoleBinding{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "write-jobs", Namespace: "job-namespace"},
|
||||
Subjects: []rbacapi.Subject{{Kind: "User", Name: "job-writer-namespace"}},
|
||||
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "write-jobs"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "create-rolebindings", Namespace: "job-namespace"},
|
||||
Subjects: []rbacapi.Subject{
|
||||
{Kind: "User", Name: "job-writer-namespace"},
|
||||
{Kind: "User", Name: "any-rolebinding-writer-namespace"},
|
||||
},
|
||||
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "create-rolebindings"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "bind-any-clusterrole", Namespace: "job-namespace"},
|
||||
Subjects: []rbacapi.Subject{{Kind: "User", Name: "any-rolebinding-writer-namespace"}},
|
||||
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "bind-any-clusterrole"},
|
||||
},
|
||||
},
|
||||
},
|
||||
requests: []request{
|
||||
// Create the namespace used later in the test
|
||||
{superUser, "POST", "", "namespaces", "", "", jobNamespace, http.StatusCreated},
|
||||
{superUser, "POST", "", "namespaces", "", "", forbiddenNamespace, http.StatusCreated},
|
||||
|
||||
{"user-with-no-permissions", "POST", "batch", "jobs", "job-namespace", "", aJob, http.StatusForbidden},
|
||||
{"user-with-no-permissions", "GET", "batch", "jobs", "job-namespace", "pi", "", http.StatusForbidden},
|
||||
|
||||
// job-writer-namespace cannot write to the "forbidden-namespace"
|
||||
{"job-writer-namespace", "GET", "batch", "jobs", "forbidden-namespace", "", "", http.StatusForbidden},
|
||||
{"job-writer-namespace", "GET", "batch", "jobs", "forbidden-namespace", "pi", "", http.StatusForbidden},
|
||||
{"job-writer-namespace", "POST", "batch", "jobs", "forbidden-namespace", "", aJob, http.StatusForbidden},
|
||||
{"job-writer-namespace", "GET", "batch", "jobs", "forbidden-namespace", "pi", "", http.StatusForbidden},
|
||||
|
||||
// job-writer can write to any namespace
|
||||
{"job-writer", "GET", "batch", "jobs", "forbidden-namespace", "", "", http.StatusOK},
|
||||
{"job-writer", "GET", "batch", "jobs", "forbidden-namespace", "pi", "", http.StatusNotFound},
|
||||
{"job-writer", "POST", "batch", "jobs", "forbidden-namespace", "", aJob, http.StatusCreated},
|
||||
{"job-writer", "GET", "batch", "jobs", "forbidden-namespace", "pi", "", http.StatusOK},
|
||||
|
||||
{"job-writer-namespace", "GET", "batch", "jobs", "job-namespace", "", "", http.StatusOK},
|
||||
{"job-writer-namespace", "GET", "batch", "jobs", "job-namespace", "pi", "", http.StatusNotFound},
|
||||
{"job-writer-namespace", "POST", "batch", "jobs", "job-namespace", "", aJob, http.StatusCreated},
|
||||
{"job-writer-namespace", "GET", "batch", "jobs", "job-namespace", "pi", "", http.StatusOK},
|
||||
|
||||
// cannot bind role anywhere
|
||||
{"user-with-no-permissions", "POST", "rbac.authorization.k8s.io", "rolebindings", "job-namespace", "", writeJobsRoleBinding, http.StatusForbidden},
|
||||
// can only bind role in namespace where they have explicit bind permission
|
||||
{"any-rolebinding-writer-namespace", "POST", "rbac.authorization.k8s.io", "rolebindings", "forbidden-namespace", "", writeJobsRoleBinding, http.StatusForbidden},
|
||||
// can only bind role in namespace where they have covering permissions
|
||||
{"job-writer-namespace", "POST", "rbac.authorization.k8s.io", "rolebindings", "forbidden-namespace", "", writeJobsRoleBinding, http.StatusForbidden},
|
||||
{"job-writer-namespace", "POST", "rbac.authorization.k8s.io", "rolebindings", "job-namespace", "", writeJobsRoleBinding, http.StatusCreated},
|
||||
{superUser, "DELETE", "rbac.authorization.k8s.io", "rolebindings", "job-namespace", "pi", "", http.StatusOK},
|
||||
// can bind role in any namespace where they have covering permissions
|
||||
{"job-writer", "POST", "rbac.authorization.k8s.io", "rolebindings", "forbidden-namespace", "", writeJobsRoleBinding, http.StatusCreated},
|
||||
{superUser, "DELETE", "rbac.authorization.k8s.io", "rolebindings", "forbidden-namespace", "pi", "", http.StatusOK},
|
||||
// cannot bind role because they don't have covering permissions
|
||||
{"nonescalating-rolebinding-writer", "POST", "rbac.authorization.k8s.io", "rolebindings", "job-namespace", "", writeJobsRoleBinding, http.StatusForbidden},
|
||||
// can bind role because they have explicit bind permission
|
||||
{"any-rolebinding-writer", "POST", "rbac.authorization.k8s.io", "rolebindings", "job-namespace", "", writeJobsRoleBinding, http.StatusCreated},
|
||||
{superUser, "DELETE", "rbac.authorization.k8s.io", "rolebindings", "job-namespace", "pi", "", http.StatusOK},
|
||||
{"any-rolebinding-writer-namespace", "POST", "rbac.authorization.k8s.io", "rolebindings", "job-namespace", "", writeJobsRoleBinding, http.StatusCreated},
|
||||
{superUser, "DELETE", "rbac.authorization.k8s.io", "rolebindings", "job-namespace", "pi", "", http.StatusOK},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range tests {
|
||||
// Create an API Server.
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authorizer = newRBACAuthorizer(masterConfig)
|
||||
masterConfig.GenericConfig.Authenticator = bearertoken.New(tokenfile.New(map[string]*user.DefaultInfo{
|
||||
superUser: {Name: "admin", Groups: []string{"system:masters"}},
|
||||
"any-rolebinding-writer": {Name: "any-rolebinding-writer"},
|
||||
"any-rolebinding-writer-namespace": {Name: "any-rolebinding-writer-namespace"},
|
||||
"bob": {Name: "bob"},
|
||||
"job-writer": {Name: "job-writer"},
|
||||
"job-writer-namespace": {Name: "job-writer-namespace"},
|
||||
"nonescalating-rolebinding-writer": {Name: "nonescalating-rolebinding-writer"},
|
||||
"pod-reader": {Name: "pod-reader"},
|
||||
"user-with-no-permissions": {Name: "user-with-no-permissions"},
|
||||
}))
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
clientConfig := &restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}}
|
||||
|
||||
// Bootstrap the API Server with the test case's initial roles.
|
||||
if err := tc.bootstrapRoles.bootstrap(clientsetForToken(superUser, clientConfig)); err != nil {
|
||||
t.Errorf("case %d: failed to apply initial roles: %v", i, err)
|
||||
continue
|
||||
}
|
||||
previousResourceVersion := make(map[string]float64)
|
||||
|
||||
for j, r := range tc.requests {
|
||||
testGroup, ok := testapi.Groups[r.apiGroup]
|
||||
if !ok {
|
||||
t.Errorf("case %d %d: unknown api group %q, %s", i, j, r.apiGroup, r)
|
||||
continue
|
||||
}
|
||||
path := testGroup.ResourcePath(r.resource, r.namespace, r.name)
|
||||
|
||||
var body io.Reader
|
||||
if r.body != "" {
|
||||
sub := ""
|
||||
if r.verb == "PUT" {
|
||||
// For update operations, insert previous resource version
|
||||
if resVersion := previousResourceVersion[getPreviousResourceVersionKey(path, "")]; resVersion != 0 {
|
||||
sub += fmt.Sprintf(",\"resourceVersion\": \"%v\"", resVersion)
|
||||
}
|
||||
}
|
||||
body = strings.NewReader(fmt.Sprintf(r.body, sub))
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(r.verb, s.URL+path, body)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
func() {
|
||||
reqDump, err := httputil.DumpRequest(req, true)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to dump request: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := clientForToken(r.token).Do(req)
|
||||
if err != nil {
|
||||
t.Errorf("case %d, req %d: failed to make request: %v", i, j, err)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
respDump, err := httputil.DumpResponse(resp, true)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to dump response: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if resp.StatusCode != r.expectedStatus {
|
||||
// When debugging is on, dump the entire request and response. Very helpful for
|
||||
// debugging malformed test cases.
|
||||
//
|
||||
// To turn on debugging, use the '-args' flag.
|
||||
//
|
||||
// go test -v -tags integration -run RBAC -args -v 10
|
||||
//
|
||||
glog.V(8).Infof("case %d, req %d: %s\n%s\n", i, j, reqDump, respDump)
|
||||
t.Errorf("case %d, req %d: %s expected %q got %q", i, j, r, statusCode(r.expectedStatus), statusCode(resp.StatusCode))
|
||||
}
|
||||
|
||||
b, _ := ioutil.ReadAll(resp.Body)
|
||||
|
||||
if r.verb == "POST" && (resp.StatusCode/100) == 2 {
|
||||
// For successful create operations, extract resourceVersion
|
||||
id, currentResourceVersion, err := parseResourceVersion(b)
|
||||
if err == nil {
|
||||
key := getPreviousResourceVersionKey(path, id)
|
||||
previousResourceVersion[key] = currentResourceVersion
|
||||
} else {
|
||||
t.Logf("error in trying to extract resource version: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBootstrapping(t *testing.T) {
|
||||
superUser := "admin/system:masters"
|
||||
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authorizer = newRBACAuthorizer(masterConfig)
|
||||
masterConfig.GenericConfig.Authenticator = bearertoken.New(tokenfile.New(map[string]*user.DefaultInfo{
|
||||
superUser: {Name: "admin", Groups: []string{"system:masters"}},
|
||||
}))
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{BearerToken: superUser, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[api.GroupName].GroupVersion()}})
|
||||
|
||||
watcher, err := clientset.Rbac().ClusterRoles().Watch(metav1.ListOptions{ResourceVersion: "0"})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
_, err = watch.Until(30*time.Second, watcher, func(event watch.Event) (bool, error) {
|
||||
if event.Type != watch.Added {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
clusterRoles, err := clientset.Rbac().ClusterRoles().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if len(clusterRoles.Items) == 0 {
|
||||
t.Fatalf("missing cluster roles")
|
||||
}
|
||||
|
||||
for _, clusterRole := range clusterRoles.Items {
|
||||
if clusterRole.Name == "cluster-admin" {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
t.Errorf("missing cluster-admin: %v", clusterRoles)
|
||||
|
||||
healthBytes, err := clientset.Discovery().RESTClient().Get().AbsPath("/healthz/poststarthook/rbac/bootstrap-roles").DoRaw()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
t.Errorf("error bootstrapping roles: %s", string(healthBytes))
|
||||
}
|
25
vendor/k8s.io/kubernetes/test/integration/benchmark-controller.json
generated
vendored
Normal file
25
vendor/k8s.io/kubernetes/test/integration/benchmark-controller.json
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
{
|
||||
"kind": "ReplicationController",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "test-controller",
|
||||
"namespace": "test",
|
||||
"labels": {"name": "test-controller"}
|
||||
},
|
||||
"spec": {
|
||||
"replicas": 0,
|
||||
"selector": {"name": "test-pod"},
|
||||
"template": {
|
||||
"metadata": {
|
||||
"namespace": "test",
|
||||
"labels": {"name": "test-pod"}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [{
|
||||
"name": "test-container",
|
||||
"image": "gcr.io/google_containers/pause-amd64:3.0"
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
53
vendor/k8s.io/kubernetes/test/integration/client/BUILD
generated
vendored
Normal file
53
vendor/k8s.io/kubernetes/test/integration/client/BUILD
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"client_test.go",
|
||||
"dynamic_client_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/client",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
837
vendor/k8s.io/kubernetes/test/integration/client/client_test.go
generated
vendored
Normal file
837
vendor/k8s.io/kubernetes/test/integration/client/client_test.go
generated
vendored
Normal file
@ -0,0 +1,837 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
rt "runtime"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
e2e "k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestClient(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("client", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
info, err := client.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if e, a := version.Get(), *info; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("expected %#v, got %#v", e, a)
|
||||
}
|
||||
|
||||
pods, err := client.Core().Pods(ns.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if len(pods.Items) != 0 {
|
||||
t.Errorf("expected no pods, got %#v", pods)
|
||||
}
|
||||
|
||||
// get a validation error
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "test",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
got, err := client.Core().Pods(ns.Name).Create(pod)
|
||||
if err == nil {
|
||||
t.Fatalf("unexpected non-error: %v", got)
|
||||
}
|
||||
|
||||
// get a created pod
|
||||
pod.Spec.Containers[0].Image = "an-image"
|
||||
got, err = client.Core().Pods(ns.Name).Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if got.Name == "" {
|
||||
t.Errorf("unexpected empty pod Name %v", got)
|
||||
}
|
||||
|
||||
// pod is shown, but not scheduled
|
||||
pods, err = client.Core().Pods(ns.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if len(pods.Items) != 1 {
|
||||
t.Errorf("expected one pod, got %#v", pods)
|
||||
}
|
||||
actual := pods.Items[0]
|
||||
if actual.Name != got.Name {
|
||||
t.Errorf("expected pod %#v, got %#v", got, actual)
|
||||
}
|
||||
if actual.Spec.NodeName != "" {
|
||||
t.Errorf("expected pod to be unscheduled, got %#v", actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAtomicPut(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("atomic-put", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
rcBody := v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: c.Core().RESTClient().APIVersion().String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "atomicrc",
|
||||
Namespace: ns.Name,
|
||||
Labels: map[string]string{
|
||||
"name": "atomicrc",
|
||||
},
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: func(i int32) *int32 { return &i }(0),
|
||||
Selector: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "name", Image: "image"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
rcs := c.Core().ReplicationControllers(ns.Name)
|
||||
rc, err := rcs.Create(&rcBody)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating atomicRC: %v", err)
|
||||
}
|
||||
testLabels := labels.Set{
|
||||
"foo": "bar",
|
||||
}
|
||||
for i := 0; i < 5; i++ {
|
||||
// a: z, b: y, etc...
|
||||
testLabels[string([]byte{byte('a' + i)})] = string([]byte{byte('z' - i)})
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(testLabels))
|
||||
for label, value := range testLabels {
|
||||
go func(l, v string) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
tmpRC, err := rcs.Get(rc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Error getting atomicRC: %v", err)
|
||||
continue
|
||||
}
|
||||
if tmpRC.Spec.Selector == nil {
|
||||
tmpRC.Spec.Selector = map[string]string{l: v}
|
||||
tmpRC.Spec.Template.Labels = map[string]string{l: v}
|
||||
} else {
|
||||
tmpRC.Spec.Selector[l] = v
|
||||
tmpRC.Spec.Template.Labels[l] = v
|
||||
}
|
||||
tmpRC, err = rcs.Update(tmpRC)
|
||||
if err != nil {
|
||||
if apierrors.IsConflict(err) {
|
||||
// This is what we expect.
|
||||
continue
|
||||
}
|
||||
t.Errorf("Unexpected error putting atomicRC: %v", err)
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
}(label, value)
|
||||
}
|
||||
wg.Wait()
|
||||
rc, err = rcs.Get(rc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed getting atomicRC after writers are complete: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(testLabels, labels.Set(rc.Spec.Selector)) {
|
||||
t.Errorf("Selector PUTs were not atomic: wanted %v, got %v", testLabels, rc.Spec.Selector)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatch(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("patch", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
name := "patchpod"
|
||||
resource := "pods"
|
||||
podBody := v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: c.Core().RESTClient().APIVersion().String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "name", Image: "image"},
|
||||
},
|
||||
},
|
||||
}
|
||||
pods := c.Core().Pods(ns.Name)
|
||||
pod, err := pods.Create(&podBody)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating patchpods: %v", err)
|
||||
}
|
||||
|
||||
patchBodies := map[schema.GroupVersion]map[types.PatchType]struct {
|
||||
AddLabelBody []byte
|
||||
RemoveLabelBody []byte
|
||||
RemoveAllLabelsBody []byte
|
||||
}{
|
||||
v1.SchemeGroupVersion: {
|
||||
types.JSONPatchType: {
|
||||
[]byte(`[{"op":"add","path":"/metadata/labels","value":{"foo":"bar","baz":"qux"}}]`),
|
||||
[]byte(`[{"op":"remove","path":"/metadata/labels/foo"}]`),
|
||||
[]byte(`[{"op":"remove","path":"/metadata/labels"}]`),
|
||||
},
|
||||
types.MergePatchType: {
|
||||
[]byte(`{"metadata":{"labels":{"foo":"bar","baz":"qux"}}}`),
|
||||
[]byte(`{"metadata":{"labels":{"foo":null}}}`),
|
||||
[]byte(`{"metadata":{"labels":null}}`),
|
||||
},
|
||||
types.StrategicMergePatchType: {
|
||||
[]byte(`{"metadata":{"labels":{"foo":"bar","baz":"qux"}}}`),
|
||||
[]byte(`{"metadata":{"labels":{"foo":null}}}`),
|
||||
[]byte(`{"metadata":{"labels":{"$patch":"replace"}}}`),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pb := patchBodies[c.Core().RESTClient().APIVersion()]
|
||||
|
||||
execPatch := func(pt types.PatchType, body []byte) error {
|
||||
result := c.Core().RESTClient().Patch(pt).
|
||||
Resource(resource).
|
||||
Namespace(ns.Name).
|
||||
Name(name).
|
||||
Body(body).
|
||||
Do()
|
||||
if result.Error() != nil {
|
||||
return result.Error()
|
||||
}
|
||||
|
||||
// trying to chase flakes, this should give us resource versions of objects as we step through
|
||||
jsonObj, err := result.Raw()
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
} else {
|
||||
t.Logf("%v", string(jsonObj))
|
||||
}
|
||||
|
||||
obj, err := result.Get()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
metadata, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// this call waits for the resourceVersion to be reached in the cache before returning. We need to do this because
|
||||
// the patch gets its initial object from the storage, and the cache serves that. If it is out of date,
|
||||
// then our initial patch is applied to an old resource version, which conflicts and then the updated object shows
|
||||
// a conflicting diff, which permanently fails the patch. This gives expected stability in the patch without
|
||||
// retrying on an known number of conflicts below in the test.
|
||||
if _, err := c.Core().Pods(ns.Name).Get(name, metav1.GetOptions{ResourceVersion: metadata.GetResourceVersion()}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
for k, v := range pb {
|
||||
// add label
|
||||
err := execPatch(k, v.AddLabelBody)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed updating patchpod with patch type %s: %v", k, err)
|
||||
}
|
||||
pod, err = pods.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed getting patchpod: %v", err)
|
||||
}
|
||||
if len(pod.Labels) != 2 || pod.Labels["foo"] != "bar" || pod.Labels["baz"] != "qux" {
|
||||
t.Errorf("Failed updating patchpod with patch type %s: labels are: %v", k, pod.Labels)
|
||||
}
|
||||
|
||||
// remove one label
|
||||
err = execPatch(k, v.RemoveLabelBody)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed updating patchpod with patch type %s: %v", k, err)
|
||||
}
|
||||
pod, err = pods.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed getting patchpod: %v", err)
|
||||
}
|
||||
if len(pod.Labels) != 1 || pod.Labels["baz"] != "qux" {
|
||||
t.Errorf("Failed updating patchpod with patch type %s: labels are: %v", k, pod.Labels)
|
||||
}
|
||||
|
||||
// remove all labels
|
||||
err = execPatch(k, v.RemoveAllLabelsBody)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed updating patchpod with patch type %s: %v", k, err)
|
||||
}
|
||||
pod, err = pods.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed getting patchpod: %v", err)
|
||||
}
|
||||
if pod.Labels != nil {
|
||||
t.Errorf("Failed remove all labels from patchpod with patch type %s: %v", k, pod.Labels)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatchWithCreateOnUpdate(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("patch-with-create", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
endpointTemplate := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "patchendpoint",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 80, Protocol: v1.ProtocolTCP}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
patchEndpoint := func(json []byte) (runtime.Object, error) {
|
||||
return c.Core().RESTClient().Patch(types.MergePatchType).Resource("endpoints").Namespace(ns.Name).Name("patchendpoint").Body(json).Do().Get()
|
||||
}
|
||||
|
||||
// Make sure patch doesn't get to CreateOnUpdate
|
||||
{
|
||||
endpointJSON, err := runtime.Encode(legacyscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), endpointTemplate)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating endpoint JSON: %v", err)
|
||||
}
|
||||
if obj, err := patchEndpoint(endpointJSON); !apierrors.IsNotFound(err) {
|
||||
t.Errorf("Expected notfound creating from patch, got error=%v and object: %#v", err, obj)
|
||||
}
|
||||
}
|
||||
|
||||
// Create the endpoint (endpoints set AllowCreateOnUpdate=true) to get a UID and resource version
|
||||
createdEndpoint, err := c.Core().Endpoints(ns.Name).Update(endpointTemplate)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating endpoint: %v", err)
|
||||
}
|
||||
|
||||
// Make sure identity patch is accepted
|
||||
{
|
||||
endpointJSON, err := runtime.Encode(legacyscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), createdEndpoint)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating endpoint JSON: %v", err)
|
||||
}
|
||||
if _, err := patchEndpoint(endpointJSON); err != nil {
|
||||
t.Errorf("Failed patching endpoint: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure patch complains about a mismatched resourceVersion
|
||||
{
|
||||
endpointTemplate.Name = ""
|
||||
endpointTemplate.UID = ""
|
||||
endpointTemplate.ResourceVersion = "1"
|
||||
endpointJSON, err := runtime.Encode(legacyscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), endpointTemplate)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating endpoint JSON: %v", err)
|
||||
}
|
||||
if _, err := patchEndpoint(endpointJSON); !apierrors.IsConflict(err) {
|
||||
t.Errorf("Expected error, got %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure patch complains about mutating the UID
|
||||
{
|
||||
endpointTemplate.Name = ""
|
||||
endpointTemplate.UID = "abc"
|
||||
endpointTemplate.ResourceVersion = ""
|
||||
endpointJSON, err := runtime.Encode(legacyscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), endpointTemplate)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating endpoint JSON: %v", err)
|
||||
}
|
||||
if _, err := patchEndpoint(endpointJSON); !apierrors.IsInvalid(err) {
|
||||
t.Errorf("Expected error, got %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure patch complains about a mismatched name
|
||||
{
|
||||
endpointTemplate.Name = "changedname"
|
||||
endpointTemplate.UID = ""
|
||||
endpointTemplate.ResourceVersion = ""
|
||||
endpointJSON, err := runtime.Encode(legacyscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), endpointTemplate)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating endpoint JSON: %v", err)
|
||||
}
|
||||
if _, err := patchEndpoint(endpointJSON); !apierrors.IsBadRequest(err) {
|
||||
t.Errorf("Expected error, got %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure patch containing originally submitted JSON is accepted
|
||||
{
|
||||
endpointTemplate.Name = ""
|
||||
endpointTemplate.UID = ""
|
||||
endpointTemplate.ResourceVersion = ""
|
||||
endpointJSON, err := runtime.Encode(legacyscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), endpointTemplate)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating endpoint JSON: %v", err)
|
||||
}
|
||||
if _, err := patchEndpoint(endpointJSON); err != nil {
|
||||
t.Errorf("Failed patching endpoint: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAPIVersions(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
|
||||
clientVersion := c.Core().RESTClient().APIVersion().String()
|
||||
g, err := c.Discovery().ServerGroups()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get api versions: %v", err)
|
||||
}
|
||||
versions := metav1.ExtractGroupVersions(g)
|
||||
|
||||
// Verify that the server supports the API version used by the client.
|
||||
for _, version := range versions {
|
||||
if version == clientVersion {
|
||||
return
|
||||
}
|
||||
}
|
||||
t.Errorf("Server does not support APIVersion used by client. Server supported APIVersions: '%v', client APIVersion: '%v'", versions, clientVersion)
|
||||
}
|
||||
|
||||
func TestSingleWatch(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("single-watch", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
|
||||
mkEvent := func(i int) *v1.Event {
|
||||
name := fmt.Sprintf("event-%v", i)
|
||||
return &v1.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ns.Name,
|
||||
Name: name,
|
||||
},
|
||||
InvolvedObject: v1.ObjectReference{
|
||||
Namespace: ns.Name,
|
||||
Name: name,
|
||||
},
|
||||
Reason: fmt.Sprintf("event %v", i),
|
||||
}
|
||||
}
|
||||
|
||||
rv1 := ""
|
||||
for i := 0; i < 10; i++ {
|
||||
event := mkEvent(i)
|
||||
got, err := client.Core().Events(ns.Name).Create(event)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating event %#q: %v", event, err)
|
||||
}
|
||||
if rv1 == "" {
|
||||
rv1 = got.ResourceVersion
|
||||
if rv1 == "" {
|
||||
t.Fatal("did not get a resource version.")
|
||||
}
|
||||
}
|
||||
t.Logf("Created event %#v", got.ObjectMeta)
|
||||
}
|
||||
|
||||
w, err := client.Core().RESTClient().Get().
|
||||
Namespace(ns.Name).
|
||||
Resource("events").
|
||||
VersionedParams(&metav1.ListOptions{
|
||||
ResourceVersion: rv1,
|
||||
Watch: true,
|
||||
FieldSelector: fields.OneTermEqualSelector("metadata.name", "event-9").String(),
|
||||
}, metav1.ParameterCodec).
|
||||
Watch()
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed watch: %v", err)
|
||||
}
|
||||
defer w.Stop()
|
||||
|
||||
select {
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Fatalf("watch took longer than %s", wait.ForeverTestTimeout.String())
|
||||
case got, ok := <-w.ResultChan():
|
||||
if !ok {
|
||||
t.Fatal("Watch channel closed unexpectedly.")
|
||||
}
|
||||
|
||||
// We expect to see an ADD of event-9 and only event-9. (This
|
||||
// catches a bug where all the events would have been sent down
|
||||
// the channel.)
|
||||
if e, a := watch.Added, got.Type; e != a {
|
||||
t.Errorf("Wanted %v, got %v", e, a)
|
||||
}
|
||||
switch o := got.Object.(type) {
|
||||
case *v1.Event:
|
||||
if e, a := "event-9", o.Name; e != a {
|
||||
t.Errorf("Wanted %v, got %v", e, a)
|
||||
}
|
||||
default:
|
||||
t.Fatalf("Unexpected watch event containing object %#q", got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiWatch(t *testing.T) {
|
||||
// Disable this test as long as it demonstrates a problem.
|
||||
// TODO: Reenable this test when we get #6059 resolved.
|
||||
t.Skip()
|
||||
|
||||
const watcherCount = 50
|
||||
rt.GOMAXPROCS(watcherCount)
|
||||
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("multi-watch", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
|
||||
dummyEvent := func(i int) *v1.Event {
|
||||
name := fmt.Sprintf("unrelated-%v", i)
|
||||
return &v1.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%v.%x", name, time.Now().UnixNano()),
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
InvolvedObject: v1.ObjectReference{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Reason: fmt.Sprintf("unrelated change %v", i),
|
||||
}
|
||||
}
|
||||
|
||||
type timePair struct {
|
||||
t time.Time
|
||||
name string
|
||||
}
|
||||
|
||||
receivedTimes := make(chan timePair, watcherCount*2)
|
||||
watchesStarted := sync.WaitGroup{}
|
||||
|
||||
// make a bunch of pods and watch them
|
||||
for i := 0; i < watcherCount; i++ {
|
||||
watchesStarted.Add(1)
|
||||
name := fmt.Sprintf("multi-watch-%v", i)
|
||||
got, err := client.Core().Pods(ns.Name).Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: labels.Set{"watchlabel": name},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: e2e.GetPauseImageName(client),
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't make %v: %v", name, err)
|
||||
}
|
||||
go func(name, rv string) {
|
||||
options := metav1.ListOptions{
|
||||
LabelSelector: labels.Set{"watchlabel": name}.AsSelector().String(),
|
||||
ResourceVersion: rv,
|
||||
}
|
||||
w, err := client.Core().Pods(ns.Name).Watch(options)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("watch error for %v: %v", name, err))
|
||||
}
|
||||
defer w.Stop()
|
||||
watchesStarted.Done()
|
||||
e, ok := <-w.ResultChan() // should get the update (that we'll do below)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("%v ended early?", name))
|
||||
}
|
||||
if e.Type != watch.Modified {
|
||||
panic(fmt.Sprintf("Got unexpected watch notification:\n%v: %+v %+v", name, e, e.Object))
|
||||
}
|
||||
receivedTimes <- timePair{time.Now(), name}
|
||||
}(name, got.ObjectMeta.ResourceVersion)
|
||||
}
|
||||
log.Printf("%v: %v pods made and watchers started", time.Now(), watcherCount)
|
||||
|
||||
// wait for watches to start before we start spamming the system with
|
||||
// objects below, otherwise we'll hit the watch window restriction.
|
||||
watchesStarted.Wait()
|
||||
|
||||
const (
|
||||
useEventsAsUnrelatedType = false
|
||||
usePodsAsUnrelatedType = true
|
||||
)
|
||||
|
||||
// make a bunch of unrelated changes in parallel
|
||||
if useEventsAsUnrelatedType {
|
||||
const unrelatedCount = 3000
|
||||
var wg sync.WaitGroup
|
||||
defer wg.Wait()
|
||||
changeToMake := make(chan int, unrelatedCount*2)
|
||||
changeMade := make(chan int, unrelatedCount*2)
|
||||
go func() {
|
||||
for i := 0; i < unrelatedCount; i++ {
|
||||
changeToMake <- i
|
||||
}
|
||||
close(changeToMake)
|
||||
}()
|
||||
for i := 0; i < 50; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
i, ok := <-changeToMake
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if _, err := client.Core().Events(ns.Name).Create(dummyEvent(i)); err != nil {
|
||||
panic(fmt.Sprintf("couldn't make an event: %v", err))
|
||||
}
|
||||
changeMade <- i
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
for i := 0; i < 2000; i++ {
|
||||
<-changeMade
|
||||
if (i+1)%50 == 0 {
|
||||
log.Printf("%v: %v unrelated changes made", time.Now(), i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
if usePodsAsUnrelatedType {
|
||||
const unrelatedCount = 3000
|
||||
var wg sync.WaitGroup
|
||||
defer wg.Wait()
|
||||
changeToMake := make(chan int, unrelatedCount*2)
|
||||
changeMade := make(chan int, unrelatedCount*2)
|
||||
go func() {
|
||||
for i := 0; i < unrelatedCount; i++ {
|
||||
changeToMake <- i
|
||||
}
|
||||
close(changeToMake)
|
||||
}()
|
||||
for i := 0; i < 50; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
i, ok := <-changeToMake
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
name := fmt.Sprintf("unrelated-%v", i)
|
||||
_, err := client.Core().Pods(ns.Name).Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "nothing",
|
||||
Image: e2e.GetPauseImageName(client),
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("couldn't make unrelated pod: %v", err))
|
||||
}
|
||||
changeMade <- i
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
for i := 0; i < 2000; i++ {
|
||||
<-changeMade
|
||||
if (i+1)%50 == 0 {
|
||||
log.Printf("%v: %v unrelated changes made", time.Now(), i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Now we still have changes being made in parallel, but at least 1000 have been made.
|
||||
// Make some updates to send down the watches.
|
||||
sentTimes := make(chan timePair, watcherCount*2)
|
||||
for i := 0; i < watcherCount; i++ {
|
||||
go func(i int) {
|
||||
name := fmt.Sprintf("multi-watch-%v", i)
|
||||
pod, err := client.Core().Pods(ns.Name).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Couldn't get %v: %v", name, err))
|
||||
}
|
||||
pod.Spec.Containers[0].Image = e2e.GetPauseImageName(client)
|
||||
sentTimes <- timePair{time.Now(), name}
|
||||
if _, err := client.Core().Pods(ns.Name).Update(pod); err != nil {
|
||||
panic(fmt.Sprintf("Couldn't make %v: %v", name, err))
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
sent := map[string]time.Time{}
|
||||
for i := 0; i < watcherCount; i++ {
|
||||
tp := <-sentTimes
|
||||
sent[tp.name] = tp.t
|
||||
}
|
||||
log.Printf("all changes made")
|
||||
dur := map[string]time.Duration{}
|
||||
for i := 0; i < watcherCount; i++ {
|
||||
tp := <-receivedTimes
|
||||
delta := tp.t.Sub(sent[tp.name])
|
||||
dur[tp.name] = delta
|
||||
log.Printf("%v: %v", tp.name, delta)
|
||||
}
|
||||
log.Printf("all watches ended")
|
||||
t.Errorf("durations: %v", dur)
|
||||
}
|
||||
|
||||
func runSelfLinkTestOnNamespace(t *testing.T, c clientset.Interface, namespace string) {
|
||||
podBody := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "selflinktest",
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
"name": "selflinktest",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "name", Image: "image"},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod, err := c.CoreV1().Pods(namespace).Create(&podBody)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating selflinktest pod: %v", err)
|
||||
}
|
||||
if err = c.CoreV1().RESTClient().Get().RequestURI(pod.SelfLink).Do().Into(pod); err != nil {
|
||||
t.Errorf("Failed listing pod with supplied self link '%v': %v", pod.SelfLink, err)
|
||||
}
|
||||
|
||||
podList, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed listing pods: %v", err)
|
||||
}
|
||||
|
||||
if err = c.CoreV1().RESTClient().Get().RequestURI(podList.SelfLink).Do().Into(podList); err != nil {
|
||||
t.Errorf("Failed listing pods with supplied self link '%v': %v", podList.SelfLink, err)
|
||||
}
|
||||
|
||||
found := false
|
||||
for i := range podList.Items {
|
||||
item := &podList.Items[i]
|
||||
if item.Name != "selflinktest" {
|
||||
continue
|
||||
}
|
||||
found = true
|
||||
err = c.CoreV1().RESTClient().Get().RequestURI(item.SelfLink).Do().Into(pod)
|
||||
if err != nil {
|
||||
t.Errorf("Failed listing pod with supplied self link '%v': %v", item.SelfLink, err)
|
||||
}
|
||||
break
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("never found selflinktest pod in namespace %s", namespace)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelfLinkOnNamespace(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("selflink", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
|
||||
runSelfLinkTestOnNamespace(t, c, ns.Name)
|
||||
}
|
156
vendor/k8s.io/kubernetes/test/integration/client/dynamic_client_test.go
generated
vendored
Normal file
156
vendor/k8s.io/kubernetes/test/integration/client/dynamic_client_test.go
generated
vendored
Normal file
@ -0,0 +1,156 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestDynamicClient(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("dynamic-client", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
gv := testapi.Groups[v1.GroupName].GroupVersion()
|
||||
config := &restclient.Config{
|
||||
Host: s.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: gv},
|
||||
}
|
||||
|
||||
client := clientset.NewForConfigOrDie(config)
|
||||
dynamicClient, err := dynamic.NewClient(config)
|
||||
_ = dynamicClient
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating dynamic client: %v", err)
|
||||
}
|
||||
|
||||
// Find the Pod resource
|
||||
resources, err := client.Discovery().ServerResourcesForGroupVersion(gv.String())
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error listing resources: %v", err)
|
||||
}
|
||||
|
||||
var resource metav1.APIResource
|
||||
for _, r := range resources.APIResources {
|
||||
if r.Kind == "Pod" {
|
||||
resource = r
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(resource.Name) == 0 {
|
||||
t.Fatalf("could not find the pod resource in group/version %q", gv.String())
|
||||
}
|
||||
|
||||
// Create a Pod with the normal client
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "test",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test",
|
||||
Image: "test-image",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
actual, err := client.Core().Pods(ns.Name).Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when creating pod: %v", err)
|
||||
}
|
||||
|
||||
// check dynamic list
|
||||
obj, err := dynamicClient.Resource(&resource, ns.Name).List(metav1.ListOptions{})
|
||||
unstructuredList, ok := obj.(*unstructured.UnstructuredList)
|
||||
if !ok {
|
||||
t.Fatalf("expected *unstructured.UnstructuredList, got %#v", obj)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when listing pods: %v", err)
|
||||
}
|
||||
|
||||
if len(unstructuredList.Items) != 1 {
|
||||
t.Fatalf("expected one pod, got %d", len(unstructuredList.Items))
|
||||
}
|
||||
|
||||
got, err := unstructuredToPod(&unstructuredList.Items[0])
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error converting Unstructured to v1.Pod: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(actual, got) {
|
||||
t.Fatalf("unexpected pod in list. wanted %#v, got %#v", actual, got)
|
||||
}
|
||||
|
||||
// check dynamic get
|
||||
unstruct, err := dynamicClient.Resource(&resource, ns.Name).Get(actual.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when getting pod %q: %v", actual.Name, err)
|
||||
}
|
||||
|
||||
got, err = unstructuredToPod(unstruct)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error converting Unstructured to v1.Pod: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(actual, got) {
|
||||
t.Fatalf("unexpected pod in list. wanted %#v, got %#v", actual, got)
|
||||
}
|
||||
|
||||
// delete the pod dynamically
|
||||
err = dynamicClient.Resource(&resource, ns.Name).Delete(actual.Name, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when deleting pod: %v", err)
|
||||
}
|
||||
|
||||
list, err := client.Core().Pods(ns.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when listing pods: %v", err)
|
||||
}
|
||||
|
||||
if len(list.Items) != 0 {
|
||||
t.Fatalf("expected zero pods, got %d", len(list.Items))
|
||||
}
|
||||
}
|
||||
|
||||
func unstructuredToPod(obj *unstructured.Unstructured) (*v1.Pod, error) {
|
||||
json, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pod := new(v1.Pod)
|
||||
err = runtime.DecodeInto(testapi.Default.Codec(), json, pod)
|
||||
pod.Kind = ""
|
||||
pod.APIVersion = ""
|
||||
return pod, err
|
||||
}
|
27
vendor/k8s.io/kubernetes/test/integration/client/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/client/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
39
vendor/k8s.io/kubernetes/test/integration/configmap/BUILD
generated
vendored
Normal file
39
vendor/k8s.io/kubernetes/test/integration/configmap/BUILD
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"configmap_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/configmap",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
124
vendor/k8s.io/kubernetes/test/integration/configmap/configmap_test.go
generated
vendored
Normal file
124
vendor/k8s.io/kubernetes/test/integration/configmap/configmap_test.go
generated
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package configmap
|
||||
|
||||
// This file tests use of the configMap API resource.
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
// TestConfigMap tests apiserver-side behavior of creation of ConfigMaps and pods that consume them.
|
||||
func TestConfigMap(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("config-map", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
DoTestConfigMap(t, client, ns)
|
||||
}
|
||||
|
||||
func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *v1.Namespace) {
|
||||
cfg := v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "configmap",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
"data-2": "value-2",
|
||||
"data-3": "value-3",
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := client.CoreV1().ConfigMaps(cfg.Namespace).Create(&cfg); err != nil {
|
||||
t.Errorf("unable to create test configMap: %v", err)
|
||||
}
|
||||
defer deleteConfigMapOrErrorf(t, client, cfg.Namespace, cfg.Name)
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "XXX",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "CONFIG_DATA_1",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: "configmap",
|
||||
},
|
||||
Key: "data-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "CONFIG_DATA_2",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: "configmap",
|
||||
},
|
||||
Key: "data-2",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "CONFIG_DATA_3",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: "configmap",
|
||||
},
|
||||
Key: "data-3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pod.ObjectMeta.Name = "uses-configmap"
|
||||
if _, err := client.CoreV1().Pods(ns.Name).Create(pod); err != nil {
|
||||
t.Errorf("Failed to create pod: %v", err)
|
||||
}
|
||||
defer integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name)
|
||||
}
|
||||
|
||||
func deleteConfigMapOrErrorf(t *testing.T, c clientset.Interface, ns, name string) {
|
||||
if err := c.CoreV1().ConfigMaps(ns).Delete(name, nil); err != nil {
|
||||
t.Errorf("unable to delete ConfigMap %v: %v", name, err)
|
||||
}
|
||||
}
|
27
vendor/k8s.io/kubernetes/test/integration/configmap/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/configmap/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package configmap
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
48
vendor/k8s.io/kubernetes/test/integration/daemonset/BUILD
generated
vendored
Normal file
48
vendor/k8s.io/kubernetes/test/integration/daemonset/BUILD
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"daemonset_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/daemonset",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/controller/daemon:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
9
vendor/k8s.io/kubernetes/test/integration/daemonset/OWNERS
generated
vendored
Executable file
9
vendor/k8s.io/kubernetes/test/integration/daemonset/OWNERS
generated
vendored
Executable file
@ -0,0 +1,9 @@
|
||||
approvers:
|
||||
- mikedanese
|
||||
- kow3ns
|
||||
reviewers:
|
||||
- mikedanese
|
||||
- kargakis
|
||||
- lukaszo
|
||||
- janetkuo
|
||||
- kow3ns
|
395
vendor/k8s.io/kubernetes/test/integration/daemonset/daemonset_test.go
generated
vendored
Normal file
395
vendor/k8s.io/kubernetes/test/integration/daemonset/daemonset_test.go
generated
vendored
Normal file
@ -0,0 +1,395 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package daemonset
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
corev1typed "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
extensionsv1beta1typed "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func setup(t *testing.T) (*httptest.Server, framework.CloseFunc, *daemon.DaemonSetsController, informers.SharedInformerFactory, clientset.Interface) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
_, server, closeFn := framework.RunAMaster(masterConfig)
|
||||
|
||||
config := restclient.Config{Host: server.URL}
|
||||
clientSet, err := clientset.NewForConfig(&config)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in creating clientset: %v", err)
|
||||
}
|
||||
resyncPeriod := 12 * time.Hour
|
||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "daemonset-informers")), resyncPeriod)
|
||||
metrics.UnregisterMetricAndUntrackRateLimiterUsage("daemon_controller")
|
||||
dc, err := daemon.NewDaemonSetsController(
|
||||
informers.Extensions().V1beta1().DaemonSets(),
|
||||
informers.Apps().V1beta1().ControllerRevisions(),
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Core().V1().Nodes(),
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "daemonset-controller")),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
|
||||
return server, closeFn, dc, informers, clientSet
|
||||
}
|
||||
|
||||
func testLabels() map[string]string {
|
||||
return map[string]string{"name": "test"}
|
||||
}
|
||||
|
||||
func newDaemonSet(name, namespace string) *v1beta1.DaemonSet {
|
||||
two := int32(2)
|
||||
return &v1beta1.DaemonSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DaemonSet",
|
||||
APIVersion: "extensions/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1beta1.DaemonSetSpec{
|
||||
RevisionHistoryLimit: &two,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: testLabels()},
|
||||
UpdateStrategy: v1beta1.DaemonSetUpdateStrategy{
|
||||
Type: v1beta1.OnDeleteDaemonSetStrategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: testLabels(),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "foo", Image: "bar"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newRollbackStrategy() *v1beta1.DaemonSetUpdateStrategy {
|
||||
one := intstr.FromInt(1)
|
||||
return &v1beta1.DaemonSetUpdateStrategy{
|
||||
Type: v1beta1.RollingUpdateDaemonSetStrategyType,
|
||||
RollingUpdate: &v1beta1.RollingUpdateDaemonSet{MaxUnavailable: &one},
|
||||
}
|
||||
}
|
||||
|
||||
func newOnDeleteStrategy() *v1beta1.DaemonSetUpdateStrategy {
|
||||
return &v1beta1.DaemonSetUpdateStrategy{
|
||||
Type: v1beta1.OnDeleteDaemonSetStrategyType,
|
||||
}
|
||||
}
|
||||
|
||||
func updateStrategies() []*v1beta1.DaemonSetUpdateStrategy {
|
||||
return []*v1beta1.DaemonSetUpdateStrategy{newOnDeleteStrategy(), newRollbackStrategy()}
|
||||
}
|
||||
|
||||
func allocatableResources(memory, cpu string) v1.ResourceList {
|
||||
return v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse(memory),
|
||||
v1.ResourceCPU: resource.MustParse(cpu),
|
||||
v1.ResourcePods: resource.MustParse("100"),
|
||||
}
|
||||
}
|
||||
|
||||
func resourcePodSpec(nodeName, memory, cpu string) v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
NodeName: nodeName,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "foo",
|
||||
Image: "bar",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse(memory),
|
||||
v1.ResourceCPU: resource.MustParse(cpu),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newNode(name string, label map[string]string) *v1.Node {
|
||||
return &v1.Node{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Node",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: label,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}},
|
||||
Allocatable: v1.ResourceList{v1.ResourcePods: resource.MustParse("100")},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func addNodes(nodeClient corev1typed.NodeInterface, startIndex, numNodes int, label map[string]string, t *testing.T) {
|
||||
for i := startIndex; i < startIndex+numNodes; i++ {
|
||||
_, err := nodeClient.Create(newNode(fmt.Sprintf("node-%d", i), label))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func validateDaemonSetPodsAndMarkReady(
|
||||
podClient corev1typed.PodInterface,
|
||||
podInformer cache.SharedIndexInformer,
|
||||
numberPods int,
|
||||
t *testing.T) {
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
objects := podInformer.GetIndexer().List()
|
||||
if len(objects) != numberPods {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, object := range objects {
|
||||
pod := object.(*v1.Pod)
|
||||
|
||||
ownerReferences := pod.ObjectMeta.OwnerReferences
|
||||
if len(ownerReferences) != 1 {
|
||||
return false, fmt.Errorf("Pod %s has %d OwnerReferences, expected only 1", pod.Name, len(ownerReferences))
|
||||
}
|
||||
controllerRef := ownerReferences[0]
|
||||
if got, want := controllerRef.APIVersion, "extensions/v1beta1"; got != want {
|
||||
t.Errorf("controllerRef.APIVersion = %q, want %q", got, want)
|
||||
}
|
||||
if got, want := controllerRef.Kind, "DaemonSet"; got != want {
|
||||
t.Errorf("controllerRef.Kind = %q, want %q", got, want)
|
||||
}
|
||||
if controllerRef.Controller == nil || *controllerRef.Controller != true {
|
||||
t.Errorf("controllerRef.Controller is not set to true")
|
||||
}
|
||||
|
||||
if !podutil.IsPodReady(pod) {
|
||||
podCopy := pod.DeepCopy()
|
||||
podCopy.Status = v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}},
|
||||
}
|
||||
_, err := podClient.UpdateStatus(podCopy)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func validateDaemonSetStatus(
|
||||
dsClient extensionsv1beta1typed.DaemonSetInterface,
|
||||
dsName string,
|
||||
dsNamespace string,
|
||||
expectedNumberReady int32,
|
||||
t *testing.T) {
|
||||
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
|
||||
ds, err := dsClient.Get(dsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return ds.Status.NumberReady == expectedNumberReady, nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func validateFailedPlacementEvent(eventClient corev1typed.EventInterface, t *testing.T) {
|
||||
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
|
||||
eventList, err := eventClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(eventList.Items) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
if len(eventList.Items) > 1 {
|
||||
t.Errorf("Expected 1 event got %d", len(eventList.Items))
|
||||
}
|
||||
event := eventList.Items[0]
|
||||
if event.Type != v1.EventTypeWarning {
|
||||
t.Errorf("Event type expected %s got %s", v1.EventTypeWarning, event.Type)
|
||||
}
|
||||
if event.Reason != daemon.FailedPlacementReason {
|
||||
t.Errorf("Event reason expected %s got %s", daemon.FailedPlacementReason, event.Reason)
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOneNodeDaemonLaunchesPod(t *testing.T) {
|
||||
for _, strategy := range updateStrategies() {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("one-node-daemonset-test", server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
dsClient := clientset.ExtensionsV1beta1().DaemonSets(ns.Name)
|
||||
podClient := clientset.CoreV1().Pods(ns.Name)
|
||||
nodeClient := clientset.CoreV1().Nodes()
|
||||
podInformer := informers.Core().V1().Pods().Informer()
|
||||
stopCh := make(chan struct{})
|
||||
informers.Start(stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
ds := newDaemonSet("foo", ns.Name)
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
_, err := dsClient.Create(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DaemonSet: %v", err)
|
||||
}
|
||||
_, err = nodeClient.Create(newNode("single-node", nil))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
|
||||
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 1, t)
|
||||
validateDaemonSetStatus(dsClient, ds.Name, ds.Namespace, 1, t)
|
||||
|
||||
close(stopCh)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
||||
for _, strategy := range updateStrategies() {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
dsClient := clientset.ExtensionsV1beta1().DaemonSets(ns.Name)
|
||||
podClient := clientset.CoreV1().Pods(ns.Name)
|
||||
nodeClient := clientset.CoreV1().Nodes()
|
||||
podInformer := informers.Core().V1().Pods().Informer()
|
||||
stopCh := make(chan struct{})
|
||||
informers.Start(stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
ds := newDaemonSet("foo", ns.Name)
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
_, err := dsClient.Create(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DaemonSet: %v", err)
|
||||
}
|
||||
addNodes(nodeClient, 0, 5, nil, t)
|
||||
|
||||
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 5, t)
|
||||
validateDaemonSetStatus(dsClient, ds.Name, ds.Namespace, 5, t)
|
||||
|
||||
close(stopCh)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
|
||||
for _, strategy := range updateStrategies() {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
dsClient := clientset.ExtensionsV1beta1().DaemonSets(ns.Name)
|
||||
podClient := clientset.CoreV1().Pods(ns.Name)
|
||||
nodeClient := clientset.CoreV1().Nodes()
|
||||
podInformer := informers.Core().V1().Pods().Informer()
|
||||
stopCh := make(chan struct{})
|
||||
informers.Start(stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
ds := newDaemonSet("foo", ns.Name)
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
_, err := dsClient.Create(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DaemonSet: %v", err)
|
||||
}
|
||||
node := newNode("single-node", nil)
|
||||
node.Status.Conditions = []v1.NodeCondition{
|
||||
{Type: v1.NodeReady, Status: v1.ConditionFalse},
|
||||
}
|
||||
_, err = nodeClient.Create(node)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
|
||||
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 1, t)
|
||||
validateDaemonSetStatus(dsClient, ds.Name, ds.Namespace, 1, t)
|
||||
|
||||
close(stopCh)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
for _, strategy := range updateStrategies() {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("insufficient-capacity", server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
dsClient := clientset.ExtensionsV1beta1().DaemonSets(ns.Name)
|
||||
nodeClient := clientset.CoreV1().Nodes()
|
||||
eventClient := corev1typed.New(clientset.CoreV1().RESTClient()).Events(ns.Namespace)
|
||||
stopCh := make(chan struct{})
|
||||
informers.Start(stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
ds := newDaemonSet("foo", ns.Name)
|
||||
ds.Spec.Template.Spec = resourcePodSpec("node-with-limited-memory", "120M", "75m")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
_, err := dsClient.Create(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DaemonSet: %v", err)
|
||||
}
|
||||
node := newNode("node-with-limited-memory", nil)
|
||||
node.Status.Allocatable = allocatableResources("100M", "200m")
|
||||
_, err = nodeClient.Create(node)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
|
||||
validateFailedPlacementEvent(eventClient, t)
|
||||
|
||||
close(stopCh)
|
||||
}
|
||||
}
|
27
vendor/k8s.io/kubernetes/test/integration/daemonset/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/daemonset/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package daemonset
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
44
vendor/k8s.io/kubernetes/test/integration/defaulttolerationseconds/BUILD
generated
vendored
Normal file
44
vendor/k8s.io/kubernetes/test/integration/defaulttolerationseconds/BUILD
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"defaulttolerationseconds_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/defaulttolerationseconds",
|
||||
tags = [
|
||||
"etcd",
|
||||
"integration",
|
||||
],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//plugin/pkg/admission/defaulttolerationseconds:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
104
vendor/k8s.io/kubernetes/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go
generated
vendored
Normal file
104
vendor/k8s.io/kubernetes/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go
generated
vendored
Normal file
@ -0,0 +1,104 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaulttolerationseconds
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestAdmission(t *testing.T) {
|
||||
masterConfig := framework.NewMasterConfig()
|
||||
masterConfig.GenericConfig.EnableProfiling = true
|
||||
masterConfig.GenericConfig.EnableMetrics = true
|
||||
masterConfig.GenericConfig.AdmissionControl = defaulttolerationseconds.NewDefaultTolerationSeconds()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("default-toleration-seconds", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
pod := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ns.Name,
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test",
|
||||
Image: "an-image",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
updatedPod, err := client.Core().Pods(pod.Namespace).Create(&pod)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating pod: %v", err)
|
||||
}
|
||||
|
||||
var defaultSeconds int64 = 300
|
||||
nodeNotReady := v1.Toleration{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultSeconds,
|
||||
}
|
||||
|
||||
nodeUnreachable := v1.Toleration{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultSeconds,
|
||||
}
|
||||
|
||||
found := 0
|
||||
tolerations := updatedPod.Spec.Tolerations
|
||||
for i := range tolerations {
|
||||
if found == 2 {
|
||||
break
|
||||
}
|
||||
if tolerations[i].MatchToleration(&nodeNotReady) {
|
||||
if helper.Semantic.DeepEqual(tolerations[i], nodeNotReady) {
|
||||
found++
|
||||
continue
|
||||
}
|
||||
}
|
||||
if tolerations[i].MatchToleration(&nodeUnreachable) {
|
||||
if helper.Semantic.DeepEqual(tolerations[i], nodeUnreachable) {
|
||||
found++
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if found != 2 {
|
||||
t.Fatalf("unexpected tolerations: %v\n", updatedPod.Spec.Tolerations)
|
||||
}
|
||||
}
|
27
vendor/k8s.io/kubernetes/test/integration/defaulttolerationseconds/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/defaulttolerationseconds/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaulttolerationseconds
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
64
vendor/k8s.io/kubernetes/test/integration/deployment/BUILD
generated
vendored
Normal file
64
vendor/k8s.io/kubernetes/test/integration/deployment/BUILD
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"deployment_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/deployment",
|
||||
library = ":go_default_library",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["util.go"],
|
||||
importpath = "k8s.io/kubernetes/test/integration/deployment",
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/controller/deployment:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/replicaset:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
7
vendor/k8s.io/kubernetes/test/integration/deployment/OWNERS
generated
vendored
Normal file
7
vendor/k8s.io/kubernetes/test/integration/deployment/OWNERS
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
reviewers:
|
||||
- janetkuo
|
||||
- kargakis
|
||||
- tnozicka
|
||||
approvers:
|
||||
- janetkuo
|
||||
- kargakis
|
1070
vendor/k8s.io/kubernetes/test/integration/deployment/deployment_test.go
generated
vendored
Normal file
1070
vendor/k8s.io/kubernetes/test/integration/deployment/deployment_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
27
vendor/k8s.io/kubernetes/test/integration/deployment/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/deployment/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package deployment
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
418
vendor/k8s.io/kubernetes/test/integration/deployment/util.go
generated
vendored
Normal file
418
vendor/k8s.io/kubernetes/test/integration/deployment/util.go
generated
vendored
Normal file
@ -0,0 +1,418 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package deployment
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http/httptest"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller/deployment"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/controller/replicaset"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
testutil "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
pollInterval = 100 * time.Millisecond
|
||||
pollTimeout = 60 * time.Second
|
||||
|
||||
fakeContainerName = "fake-name"
|
||||
fakeImage = "fakeimage"
|
||||
)
|
||||
|
||||
var pauseFn = func(update *v1beta1.Deployment) {
|
||||
update.Spec.Paused = true
|
||||
}
|
||||
|
||||
var resumeFn = func(update *v1beta1.Deployment) {
|
||||
update.Spec.Paused = false
|
||||
}
|
||||
|
||||
type deploymentTester struct {
|
||||
t *testing.T
|
||||
c clientset.Interface
|
||||
deployment *v1beta1.Deployment
|
||||
}
|
||||
|
||||
func testLabels() map[string]string {
|
||||
return map[string]string{"name": "test"}
|
||||
}
|
||||
|
||||
// newDeployment returns a RollingUpdate Deployment with with a fake container image
|
||||
func newDeployment(name, ns string, replicas int32) *v1beta1.Deployment {
|
||||
return &v1beta1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Deployment",
|
||||
APIVersion: "extensions/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1beta1.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: testLabels()},
|
||||
Strategy: v1beta1.DeploymentStrategy{
|
||||
Type: v1beta1.RollingUpdateDeploymentStrategyType,
|
||||
RollingUpdate: new(v1beta1.RollingUpdateDeployment),
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: testLabels(),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: fakeContainerName,
|
||||
Image: fakeImage,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newReplicaSet(name, ns string, replicas int32) *v1beta1.ReplicaSet {
|
||||
return &v1beta1.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicaSet",
|
||||
APIVersion: "extensions/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1beta1.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: testLabels(),
|
||||
},
|
||||
Replicas: &replicas,
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: testLabels(),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: fakeContainerName,
|
||||
Image: fakeImage,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newDeploymentRollback(name string, annotations map[string]string, revision int64) *v1beta1.DeploymentRollback {
|
||||
return &v1beta1.DeploymentRollback{
|
||||
Name: name,
|
||||
UpdatedAnnotations: annotations,
|
||||
RollbackTo: v1beta1.RollbackConfig{Revision: revision},
|
||||
}
|
||||
}
|
||||
|
||||
// dcSetup sets up necessities for Deployment integration test, including master, apiserver, informers, and clientset
|
||||
func dcSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *replicaset.ReplicaSetController, *deployment.DeploymentController, informers.SharedInformerFactory, clientset.Interface) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
|
||||
config := restclient.Config{Host: s.URL}
|
||||
clientSet, err := clientset.NewForConfig(&config)
|
||||
if err != nil {
|
||||
t.Fatalf("error in create clientset: %v", err)
|
||||
}
|
||||
resyncPeriod := 12 * time.Hour
|
||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "deployment-informers")), resyncPeriod)
|
||||
|
||||
metrics.UnregisterMetricAndUntrackRateLimiterUsage("deployment_controller")
|
||||
dc, err := deployment.NewDeploymentController(
|
||||
informers.Extensions().V1beta1().Deployments(),
|
||||
informers.Extensions().V1beta1().ReplicaSets(),
|
||||
informers.Core().V1().Pods(),
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "deployment-controller")),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating Deployment controller: %v", err)
|
||||
}
|
||||
rm := replicaset.NewReplicaSetController(
|
||||
informers.Extensions().V1beta1().ReplicaSets(),
|
||||
informers.Core().V1().Pods(),
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replicaset-controller")),
|
||||
replicaset.BurstReplicas,
|
||||
)
|
||||
return s, closeFn, rm, dc, informers, clientSet
|
||||
}
|
||||
|
||||
// dcSimpleSetup sets up necessities for Deployment integration test, including master, apiserver,
|
||||
// and clientset, but not controllers and informers
|
||||
func dcSimpleSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, clientset.Interface) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
|
||||
config := restclient.Config{Host: s.URL}
|
||||
clientSet, err := clientset.NewForConfig(&config)
|
||||
if err != nil {
|
||||
t.Fatalf("error in create clientset: %v", err)
|
||||
}
|
||||
return s, closeFn, clientSet
|
||||
}
|
||||
|
||||
// addPodConditionReady sets given pod status to ready at given time
|
||||
func addPodConditionReady(pod *v1.Pod, time metav1.Time) {
|
||||
pod.Status = v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
LastTransitionTime: time,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *deploymentTester) waitForDeploymentRevisionAndImage(revision, image string) error {
|
||||
if err := testutil.WaitForDeploymentRevisionAndImage(d.c, d.deployment.Namespace, d.deployment.Name, revision, image, d.t.Logf, pollInterval, pollTimeout); err != nil {
|
||||
return fmt.Errorf("failed to wait for Deployment revision %s: %v", d.deployment.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func markPodReady(c clientset.Interface, ns string, pod *v1.Pod) error {
|
||||
addPodConditionReady(pod, metav1.Now())
|
||||
_, err := c.Core().Pods(ns).UpdateStatus(pod)
|
||||
return err
|
||||
}
|
||||
|
||||
func intOrStrP(num int) *intstr.IntOrString {
|
||||
intstr := intstr.FromInt(num)
|
||||
return &intstr
|
||||
}
|
||||
|
||||
// markUpdatedPodsReady manually marks updated Deployment pods status to ready,
|
||||
// until the deployment is complete
|
||||
func (d *deploymentTester) markUpdatedPodsReady(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
|
||||
ns := d.deployment.Namespace
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
// We're done when the deployment is complete
|
||||
if completed, err := d.deploymentComplete(); err != nil {
|
||||
return false, err
|
||||
} else if completed {
|
||||
return true, nil
|
||||
}
|
||||
// Otherwise, mark remaining pods as ready
|
||||
pods, err := d.listUpdatedPods()
|
||||
if err != nil {
|
||||
d.t.Log(err)
|
||||
return false, nil
|
||||
}
|
||||
d.t.Logf("%d/%d of deployment pods are created", len(pods), *d.deployment.Spec.Replicas)
|
||||
for i := range pods {
|
||||
pod := pods[i]
|
||||
if podutil.IsPodReady(&pod) {
|
||||
continue
|
||||
}
|
||||
if err = markPodReady(d.c, ns, &pod); err != nil {
|
||||
d.t.Logf("failed to update Deployment pod %s, will retry later: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
d.t.Fatalf("failed to mark updated Deployment pods to ready: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *deploymentTester) deploymentComplete() (bool, error) {
|
||||
latest, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return deploymentutil.DeploymentComplete(d.deployment, &latest.Status), nil
|
||||
}
|
||||
|
||||
// Waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
|
||||
// Rolling update strategy should not be broken during a rolling update.
|
||||
func (d *deploymentTester) waitForDeploymentCompleteAndCheckRolling() error {
|
||||
return testutil.WaitForDeploymentCompleteAndCheckRolling(d.c, d.deployment, d.t.Logf, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
// Waits for the deployment to complete, and don't check if rolling update strategy is broken.
|
||||
// Rolling update strategy is used only during a rolling update, and can be violated in other situations,
|
||||
// such as shortly after a scaling event or the deployment is just created.
|
||||
func (d *deploymentTester) waitForDeploymentComplete() error {
|
||||
return testutil.WaitForDeploymentComplete(d.c, d.deployment, d.t.Logf, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
// waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady waits for the Deployment to complete
|
||||
// while marking updated Deployment pods as ready at the same time.
|
||||
// Uses hard check to make sure rolling update strategy is not violated at any times.
|
||||
func (d *deploymentTester) waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady() error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Manually mark updated Deployment pods as ready in a separate goroutine
|
||||
wg.Add(1)
|
||||
go d.markUpdatedPodsReady(&wg)
|
||||
|
||||
// Wait for the Deployment status to complete while Deployment pods are becoming ready
|
||||
err := d.waitForDeploymentCompleteAndCheckRolling()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to wait for Deployment %s to complete: %v", d.deployment.Name, err)
|
||||
}
|
||||
|
||||
// Wait for goroutine to finish
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForDeploymentCompleteAndMarkPodsReady waits for the Deployment to complete
|
||||
// while marking updated Deployment pods as ready at the same time.
|
||||
func (d *deploymentTester) waitForDeploymentCompleteAndMarkPodsReady() error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Manually mark updated Deployment pods as ready in a separate goroutine
|
||||
wg.Add(1)
|
||||
go d.markUpdatedPodsReady(&wg)
|
||||
|
||||
// Wait for the Deployment status to complete using soft check, while Deployment pods are becoming ready
|
||||
err := d.waitForDeploymentComplete()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to wait for Deployment status %s: %v", d.deployment.Name, err)
|
||||
}
|
||||
|
||||
// Wait for goroutine to finish
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *deploymentTester) updateDeployment(applyUpdate testutil.UpdateDeploymentFunc) (*v1beta1.Deployment, error) {
|
||||
return testutil.UpdateDeploymentWithRetries(d.c, d.deployment.Namespace, d.deployment.Name, applyUpdate, d.t.Logf, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
func (d *deploymentTester) waitForObservedDeployment(desiredGeneration int64) error {
|
||||
if err := testutil.WaitForObservedDeployment(d.c, d.deployment.Namespace, d.deployment.Name, desiredGeneration); err != nil {
|
||||
return fmt.Errorf("failed waiting for ObservedGeneration of deployment %s to become %d: %v", d.deployment.Name, desiredGeneration, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *deploymentTester) getNewReplicaSet() (*v1beta1.ReplicaSet, error) {
|
||||
deployment, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed retrieving deployment %s: %v", d.deployment.Name, err)
|
||||
}
|
||||
rs, err := deploymentutil.GetNewReplicaSet(deployment, d.c.ExtensionsV1beta1())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed retrieving new replicaset of deployment %s: %v", d.deployment.Name, err)
|
||||
}
|
||||
return rs, nil
|
||||
}
|
||||
|
||||
func (d *deploymentTester) expectNoNewReplicaSet() error {
|
||||
rs, err := d.getNewReplicaSet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rs != nil {
|
||||
return fmt.Errorf("expected deployment %s not to create a new replicaset, got %v", d.deployment.Name, rs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *deploymentTester) expectNewReplicaSet() (*v1beta1.ReplicaSet, error) {
|
||||
rs, err := d.getNewReplicaSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rs == nil {
|
||||
return nil, fmt.Errorf("expected deployment %s to create a new replicaset, got nil", d.deployment.Name)
|
||||
}
|
||||
return rs, nil
|
||||
}
|
||||
|
||||
func (d *deploymentTester) updateReplicaSet(name string, applyUpdate testutil.UpdateReplicaSetFunc) (*v1beta1.ReplicaSet, error) {
|
||||
return testutil.UpdateReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
// waitForDeploymentRollbackCleared waits for deployment either started rolling back or doesn't need to rollback.
|
||||
func (d *deploymentTester) waitForDeploymentRollbackCleared() error {
|
||||
return testutil.WaitForDeploymentRollbackCleared(d.c, d.deployment.Namespace, d.deployment.Name, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
// checkDeploymentRevisionAndImage checks if the input deployment's and its new replica set's revision and image are as expected.
|
||||
func (d *deploymentTester) checkDeploymentRevisionAndImage(revision, image string) error {
|
||||
return testutil.CheckDeploymentRevisionAndImage(d.c, d.deployment.Namespace, d.deployment.Name, revision, image)
|
||||
}
|
||||
|
||||
func (d *deploymentTester) waitForDeploymentUpdatedReplicasLTE(minUpdatedReplicas int32) error {
|
||||
return testutil.WaitForDeploymentUpdatedReplicasLTE(d.c, d.deployment.Namespace, d.deployment.Name, minUpdatedReplicas, d.deployment.Generation, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
func (d *deploymentTester) waitForDeploymentWithCondition(reason string, condType v1beta1.DeploymentConditionType) error {
|
||||
return testutil.WaitForDeploymentWithCondition(d.c, d.deployment.Namespace, d.deployment.Name, reason, condType, d.t.Logf, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
func (d *deploymentTester) listUpdatedPods() ([]v1.Pod, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(d.deployment.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse deployment selector: %v", err)
|
||||
}
|
||||
pods, err := d.c.CoreV1().Pods(d.deployment.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list deployment pods, will retry later: %v", err)
|
||||
}
|
||||
newRS, err := d.getNewReplicaSet()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get new replicaset of deployment %q: %v", d.deployment.Name, err)
|
||||
}
|
||||
if newRS == nil {
|
||||
return nil, fmt.Errorf("unable to find new replicaset of deployment %q", d.deployment.Name)
|
||||
}
|
||||
|
||||
var ownedPods []v1.Pod
|
||||
for _, pod := range pods.Items {
|
||||
rs := metav1.GetControllerOf(&pod)
|
||||
if rs.UID == newRS.UID {
|
||||
ownedPods = append(ownedPods, pod)
|
||||
}
|
||||
}
|
||||
return ownedPods, nil
|
||||
}
|
||||
|
||||
func (d *deploymentTester) waitRSStable(replicaset *v1beta1.ReplicaSet) error {
|
||||
return testutil.WaitRSStable(d.t, d.c, replicaset, pollInterval, pollTimeout)
|
||||
}
|
19
vendor/k8s.io/kubernetes/test/integration/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/test/integration/doc.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package integration provides integration tests for Kubernetes.Some tests require a
|
||||
// running etcd or Docker installation on the system.
|
||||
package integration // import "k8s.io/kubernetes/test/integration"
|
60
vendor/k8s.io/kubernetes/test/integration/etcd/BUILD
generated
vendored
Normal file
60
vendor/k8s.io/kubernetes/test/integration/etcd/BUILD
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"etcd_storage_path_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/etcd",
|
||||
tags = [
|
||||
"etcd",
|
||||
"integration",
|
||||
],
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app:go_default_library",
|
||||
"//cmd/kube-apiserver/app/options:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/kubectl/cmd/util:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/transport:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
1146
vendor/k8s.io/kubernetes/test/integration/etcd/etcd_storage_path_test.go
generated
vendored
Normal file
1146
vendor/k8s.io/kubernetes/test/integration/etcd/etcd_storage_path_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
27
vendor/k8s.io/kubernetes/test/integration/etcd/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/etcd/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
45
vendor/k8s.io/kubernetes/test/integration/evictions/BUILD
generated
vendored
Normal file
45
vendor/k8s.io/kubernetes/test/integration/evictions/BUILD
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"evictions_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/evictions",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/controller/disruption:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
280
vendor/k8s.io/kubernetes/test/integration/evictions/evictions_test.go
generated
vendored
Normal file
280
vendor/k8s.io/kubernetes/test/integration/evictions/evictions_test.go
generated
vendored
Normal file
@ -0,0 +1,280 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package evictions
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http/httptest"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/disruption"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
numOfEvictions = 10
|
||||
)
|
||||
|
||||
// TestConcurrentEvictionRequests is to make sure pod disruption budgets (PDB) controller is able to
|
||||
// handle concurrent eviction requests. Original issue:#37605
|
||||
func TestConcurrentEvictionRequests(t *testing.T) {
|
||||
podNameFormat := "test-pod-%d"
|
||||
|
||||
s, closeFn, rm, informers, clientSet := rmSetup(t)
|
||||
defer closeFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("concurrent-eviction-requests", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(stopCh)
|
||||
defer close(stopCh)
|
||||
|
||||
config := restclient.Config{Host: s.URL}
|
||||
clientSet, err := clientset.NewForConfig(&config)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create clientset: %v", err)
|
||||
}
|
||||
|
||||
var gracePeriodSeconds int64 = 30
|
||||
deleteOption := &metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &gracePeriodSeconds,
|
||||
}
|
||||
|
||||
// Generate numOfEvictions pods to evict
|
||||
for i := 0; i < numOfEvictions; i++ {
|
||||
podName := fmt.Sprintf(podNameFormat, i)
|
||||
pod := newPod(podName)
|
||||
|
||||
if _, err := clientSet.CoreV1().Pods(ns.Name).Create(pod); err != nil {
|
||||
t.Errorf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
addPodConditionReady(pod)
|
||||
if _, err := clientSet.CoreV1().Pods(ns.Name).UpdateStatus(pod); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
waitToObservePods(t, informers.Core().V1().Pods().Informer(), numOfEvictions)
|
||||
|
||||
pdb := newPDB()
|
||||
if _, err := clientSet.Policy().PodDisruptionBudgets(ns.Name).Create(pdb); err != nil {
|
||||
t.Errorf("Failed to create PodDisruptionBudget: %v", err)
|
||||
}
|
||||
|
||||
waitPDBStable(t, clientSet, numOfEvictions, ns.Name, pdb.Name)
|
||||
|
||||
var numberPodsEvicted uint32 = 0
|
||||
errCh := make(chan error, 3*numOfEvictions)
|
||||
var wg sync.WaitGroup
|
||||
// spawn numOfEvictions goroutines to concurrently evict the pods
|
||||
for i := 0; i < numOfEvictions; i++ {
|
||||
wg.Add(1)
|
||||
go func(id int, errCh chan error) {
|
||||
defer wg.Done()
|
||||
podName := fmt.Sprintf(podNameFormat, id)
|
||||
eviction := newEviction(ns.Name, podName, deleteOption)
|
||||
|
||||
err := wait.PollImmediate(5*time.Second, 60*time.Second, func() (bool, error) {
|
||||
e := clientSet.Policy().Evictions(ns.Name).Evict(eviction)
|
||||
switch {
|
||||
case errors.IsTooManyRequests(e):
|
||||
return false, nil
|
||||
case errors.IsConflict(e):
|
||||
return false, fmt.Errorf("Unexpected Conflict (409) error caused by failing to handle concurrent PDB updates: %v", e)
|
||||
case e == nil:
|
||||
return true, nil
|
||||
default:
|
||||
return false, e
|
||||
}
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
// should not return here otherwise we would leak the pod
|
||||
}
|
||||
|
||||
_, err = clientSet.CoreV1().Pods(ns.Name).Get(podName, metav1.GetOptions{})
|
||||
switch {
|
||||
case errors.IsNotFound(err):
|
||||
atomic.AddUint32(&numberPodsEvicted, 1)
|
||||
// pod was evicted and deleted so return from goroutine immediately
|
||||
return
|
||||
case err == nil:
|
||||
// this shouldn't happen if the pod was evicted successfully
|
||||
errCh <- fmt.Errorf("Pod %q is expected to be evicted", podName)
|
||||
default:
|
||||
errCh <- err
|
||||
}
|
||||
|
||||
// delete pod which still exists due to error
|
||||
e := clientSet.CoreV1().Pods(ns.Name).Delete(podName, deleteOption)
|
||||
if e != nil {
|
||||
errCh <- e
|
||||
}
|
||||
|
||||
}(i, errCh)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
close(errCh)
|
||||
var errList []error
|
||||
if err := clientSet.Policy().PodDisruptionBudgets(ns.Name).Delete(pdb.Name, deleteOption); err != nil {
|
||||
errList = append(errList, fmt.Errorf("Failed to delete PodDisruptionBudget: %v", err))
|
||||
}
|
||||
for err := range errCh {
|
||||
errList = append(errList, err)
|
||||
}
|
||||
if len(errList) > 0 {
|
||||
t.Fatal(utilerrors.NewAggregate(errList))
|
||||
}
|
||||
|
||||
if atomic.LoadUint32(&numberPodsEvicted) != numOfEvictions {
|
||||
t.Fatalf("fewer number of successful evictions than expected : %d", numberPodsEvicted)
|
||||
}
|
||||
}
|
||||
|
||||
func newPod(podName string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"app": "test-evictions"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func addPodConditionReady(pod *v1.Pod) {
|
||||
pod.Status = v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newPDB() *v1beta1.PodDisruptionBudget {
|
||||
return &v1beta1.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pdb",
|
||||
},
|
||||
Spec: v1beta1.PodDisruptionBudgetSpec{
|
||||
MinAvailable: &intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: 0,
|
||||
},
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"app": "test-evictions"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newEviction(ns, evictionName string, deleteOption *metav1.DeleteOptions) *v1beta1.Eviction {
|
||||
return &v1beta1.Eviction{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "Policy/v1beta1",
|
||||
Kind: "Eviction",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: evictionName,
|
||||
Namespace: ns,
|
||||
},
|
||||
DeleteOptions: deleteOption,
|
||||
}
|
||||
}
|
||||
|
||||
func rmSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *disruption.DisruptionController, informers.SharedInformerFactory, clientset.Interface) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
|
||||
config := restclient.Config{Host: s.URL}
|
||||
clientSet, err := clientset.NewForConfig(&config)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in create clientset: %v", err)
|
||||
}
|
||||
resyncPeriod := 12 * time.Hour
|
||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pdb-informers")), resyncPeriod)
|
||||
|
||||
rm := disruption.NewDisruptionController(
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informers.Core().V1().ReplicationControllers(),
|
||||
informers.Extensions().V1beta1().ReplicaSets(),
|
||||
informers.Extensions().V1beta1().Deployments(),
|
||||
informers.Apps().V1beta1().StatefulSets(),
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "disruption-controller")),
|
||||
)
|
||||
return s, closeFn, rm, informers, clientSet
|
||||
}
|
||||
|
||||
// wait for the podInformer to observe the pods. Call this function before
|
||||
// running the RS controller to prevent the rc manager from creating new pods
|
||||
// rather than adopting the existing ones.
|
||||
func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podNum int) {
|
||||
if err := wait.PollImmediate(2*time.Second, 60*time.Second, func() (bool, error) {
|
||||
objects := podInformer.GetIndexer().List()
|
||||
if len(objects) == podNum {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func waitPDBStable(t *testing.T, clientSet clientset.Interface, podNum int32, ns, pdbName string) {
|
||||
if err := wait.PollImmediate(2*time.Second, 60*time.Second, func() (bool, error) {
|
||||
pdb, err := clientSet.Policy().PodDisruptionBudgets(ns).Get(pdbName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if pdb.Status.CurrentHealthy != podNum {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
27
vendor/k8s.io/kubernetes/test/integration/evictions/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/evictions/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package evictions
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
49
vendor/k8s.io/kubernetes/test/integration/examples/BUILD
generated
vendored
Normal file
49
vendor/k8s.io/kubernetes/test/integration/examples/BUILD
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"apiserver_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/examples",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app:go_default_library",
|
||||
"//cmd/kube-apiserver/app/options:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/cert:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/cmd/server:go_default_library",
|
||||
"//vendor/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/sample-apiserver/pkg/cmd/server:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
467
vendor/k8s.io/kubernetes/test/integration/examples/apiserver_test.go
generated
vendored
Normal file
467
vendor/k8s.io/kubernetes/test/integration/examples/apiserver_test.go
generated
vendored
Normal file
@ -0,0 +1,467 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apiserver
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
client "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/util/cert"
|
||||
apiregistrationv1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1"
|
||||
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
|
||||
kubeaggregatorserver "k8s.io/kube-aggregator/pkg/cmd/server"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
"k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1"
|
||||
sampleserver "k8s.io/sample-apiserver/pkg/cmd/server"
|
||||
)
|
||||
|
||||
var groupVersion = v1alpha1.SchemeGroupVersion
|
||||
|
||||
var groupVersionForDiscovery = metav1.GroupVersionForDiscovery{
|
||||
GroupVersion: groupVersion.String(),
|
||||
Version: groupVersion.Version,
|
||||
}
|
||||
|
||||
func TestAggregatedAPIServer(t *testing.T) {
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
certDir, _ := ioutil.TempDir("", "test-integration-apiserver")
|
||||
defer os.RemoveAll(certDir)
|
||||
_, defaultServiceClusterIPRange, _ := net.ParseCIDR("10.0.0.0/24")
|
||||
proxySigningKey, err := cert.NewPrivateKey()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
proxySigningCert, err := cert.NewSelfSignedCACert(cert.Config{CommonName: "front-proxy-ca"}, proxySigningKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
proxyCACertFile, _ := ioutil.TempFile(certDir, "proxy-ca.crt")
|
||||
if err := ioutil.WriteFile(proxyCACertFile.Name(), cert.EncodeCertPEM(proxySigningCert), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clientSigningKey, err := cert.NewPrivateKey()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clientSigningCert, err := cert.NewSelfSignedCACert(cert.Config{CommonName: "client-ca"}, clientSigningKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clientCACertFile, _ := ioutil.TempFile(certDir, "client-ca.crt")
|
||||
if err := ioutil.WriteFile(clientCACertFile.Name(), cert.EncodeCertPEM(clientSigningCert), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kubeClientConfigValue := atomic.Value{}
|
||||
go func() {
|
||||
for {
|
||||
// always get a fresh port in case something claimed the old one
|
||||
kubePort, err := framework.FindFreeLocalPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kubeAPIServerOptions := options.NewServerRunOptions()
|
||||
kubeAPIServerOptions.SecureServing.BindAddress = net.ParseIP("127.0.0.1")
|
||||
kubeAPIServerOptions.SecureServing.BindPort = kubePort
|
||||
kubeAPIServerOptions.SecureServing.ServerCert.CertDirectory = certDir
|
||||
kubeAPIServerOptions.InsecureServing.BindPort = 0
|
||||
kubeAPIServerOptions.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURL()}
|
||||
kubeAPIServerOptions.ServiceClusterIPRange = *defaultServiceClusterIPRange
|
||||
kubeAPIServerOptions.Authentication.RequestHeader.UsernameHeaders = []string{"X-Remote-User"}
|
||||
kubeAPIServerOptions.Authentication.RequestHeader.GroupHeaders = []string{"X-Remote-Group"}
|
||||
kubeAPIServerOptions.Authentication.RequestHeader.ExtraHeaderPrefixes = []string{"X-Remote-Extra-"}
|
||||
kubeAPIServerOptions.Authentication.RequestHeader.AllowedNames = []string{"kube-aggregator"}
|
||||
kubeAPIServerOptions.Authentication.RequestHeader.ClientCAFile = proxyCACertFile.Name()
|
||||
kubeAPIServerOptions.Authentication.ClientCert.ClientCA = clientCACertFile.Name()
|
||||
kubeAPIServerOptions.Authorization.Mode = "RBAC"
|
||||
|
||||
tunneler, proxyTransport, err := app.CreateNodeDialer(kubeAPIServerOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerConfig, sharedInformers, versionedInformers, _, _, err := app.CreateKubeAPIServerConfig(kubeAPIServerOptions, tunneler, proxyTransport)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Adjust the loopback config for external use (external server name and CA)
|
||||
kubeAPIServerClientConfig := rest.CopyConfig(kubeAPIServerConfig.GenericConfig.LoopbackClientConfig)
|
||||
kubeAPIServerClientConfig.CAFile = path.Join(certDir, "apiserver.crt")
|
||||
kubeAPIServerClientConfig.CAData = nil
|
||||
kubeAPIServerClientConfig.ServerName = ""
|
||||
kubeClientConfigValue.Store(kubeAPIServerClientConfig)
|
||||
|
||||
kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.EmptyDelegate, sharedInformers, versionedInformers)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := kubeAPIServer.GenericAPIServer.PrepareRun().Run(wait.NeverStop); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
|
||||
// just use json because everyone speaks it
|
||||
err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (done bool, err error) {
|
||||
obj := kubeClientConfigValue.Load()
|
||||
if obj == nil {
|
||||
return false, nil
|
||||
}
|
||||
kubeClientConfig := kubeClientConfigValue.Load().(*rest.Config)
|
||||
kubeClientConfig.ContentType = ""
|
||||
kubeClientConfig.AcceptContentTypes = ""
|
||||
kubeClient, err := client.NewForConfig(kubeClientConfig)
|
||||
if err != nil {
|
||||
// this happens because we race the API server start
|
||||
t.Log(err)
|
||||
return false, nil
|
||||
}
|
||||
if _, err := kubeClient.Discovery().ServerVersion(); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// after this point we won't be mutating, so the race detector will be fine
|
||||
kubeClientConfig := kubeClientConfigValue.Load().(*rest.Config)
|
||||
|
||||
// write a kubeconfig out for starting other API servers with delegated auth. remember, no in-cluster config
|
||||
adminKubeConfig := createKubeConfig(kubeClientConfig)
|
||||
kubeconfigFile, _ := ioutil.TempFile("", "")
|
||||
defer os.Remove(kubeconfigFile.Name())
|
||||
clientcmd.WriteToFile(*adminKubeConfig, kubeconfigFile.Name())
|
||||
wardleCertDir, _ := ioutil.TempDir("", "test-integration-wardle-server")
|
||||
defer os.RemoveAll(wardleCertDir)
|
||||
wardlePort := new(int32)
|
||||
|
||||
// start the wardle server to prove we can aggregate it
|
||||
go func() {
|
||||
for {
|
||||
// always get a fresh port in case something claimed the old one
|
||||
wardlePortInt, err := framework.FindFreeLocalPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
atomic.StoreInt32(wardlePort, int32(wardlePortInt))
|
||||
wardleCmd := sampleserver.NewCommandStartWardleServer(os.Stdout, os.Stderr, stopCh)
|
||||
wardleCmd.SetArgs([]string{
|
||||
"--bind-address", "127.0.0.1",
|
||||
"--secure-port", strconv.Itoa(wardlePortInt),
|
||||
"--requestheader-username-headers=X-Remote-User",
|
||||
"--requestheader-group-headers=X-Remote-Group",
|
||||
"--requestheader-extra-headers-prefix=X-Remote-Extra-",
|
||||
"--requestheader-client-ca-file=" + proxyCACertFile.Name(),
|
||||
"--requestheader-allowed-names=kube-aggregator",
|
||||
"--authentication-kubeconfig", kubeconfigFile.Name(),
|
||||
"--authorization-kubeconfig", kubeconfigFile.Name(),
|
||||
"--etcd-servers", framework.GetEtcdURL(),
|
||||
"--cert-dir", wardleCertDir,
|
||||
"--kubeconfig", kubeconfigFile.Name(),
|
||||
})
|
||||
if err := wardleCmd.Execute(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
|
||||
wardleClientConfig := rest.AnonymousClientConfig(kubeClientConfig)
|
||||
wardleClientConfig.CAFile = path.Join(wardleCertDir, "apiserver.crt")
|
||||
wardleClientConfig.CAData = nil
|
||||
wardleClientConfig.ServerName = ""
|
||||
wardleClientConfig.BearerToken = kubeClientConfig.BearerToken
|
||||
var wardleClient client.Interface
|
||||
err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (done bool, err error) {
|
||||
wardleClientConfig.Host = fmt.Sprintf("https://127.0.0.1:%d", atomic.LoadInt32(wardlePort))
|
||||
wardleClient, err = client.NewForConfig(wardleClientConfig)
|
||||
if err != nil {
|
||||
// this happens because we race the API server start
|
||||
t.Log(err)
|
||||
return false, nil
|
||||
}
|
||||
if _, err := wardleClient.Discovery().ServerVersion(); err != nil {
|
||||
t.Log(err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// start the aggregator
|
||||
aggregatorCertDir, _ := ioutil.TempDir("", "test-integration-aggregator")
|
||||
defer os.RemoveAll(aggregatorCertDir)
|
||||
proxyClientKey, err := cert.NewPrivateKey()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
proxyClientCert, err := cert.NewSignedCert(
|
||||
cert.Config{
|
||||
CommonName: "kube-aggregator",
|
||||
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
},
|
||||
proxyClientKey, proxySigningCert, proxySigningKey,
|
||||
)
|
||||
proxyClientCertFile, _ := ioutil.TempFile(aggregatorCertDir, "proxy-client.crt")
|
||||
proxyClientKeyFile, _ := ioutil.TempFile(aggregatorCertDir, "proxy-client.key")
|
||||
if err := ioutil.WriteFile(proxyClientCertFile.Name(), cert.EncodeCertPEM(proxyClientCert), 0600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := ioutil.WriteFile(proxyClientKeyFile.Name(), cert.EncodePrivateKeyPEM(proxyClientKey), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
aggregatorPort := new(int32)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
// always get a fresh port in case something claimed the old one
|
||||
aggregatorPortInt, err := framework.FindFreeLocalPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
atomic.StoreInt32(aggregatorPort, int32(aggregatorPortInt))
|
||||
aggregatorCmd := kubeaggregatorserver.NewCommandStartAggregator(os.Stdout, os.Stderr, stopCh)
|
||||
aggregatorCmd.SetArgs([]string{
|
||||
"--bind-address", "127.0.0.1",
|
||||
"--secure-port", strconv.Itoa(aggregatorPortInt),
|
||||
"--requestheader-username-headers", "",
|
||||
"--proxy-client-cert-file", proxyClientCertFile.Name(),
|
||||
"--proxy-client-key-file", proxyClientKeyFile.Name(),
|
||||
"--kubeconfig", kubeconfigFile.Name(),
|
||||
"--authentication-kubeconfig", kubeconfigFile.Name(),
|
||||
"--authorization-kubeconfig", kubeconfigFile.Name(),
|
||||
"--etcd-servers", framework.GetEtcdURL(),
|
||||
"--cert-dir", aggregatorCertDir,
|
||||
})
|
||||
if err := aggregatorCmd.Execute(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
|
||||
aggregatorClientConfig := rest.AnonymousClientConfig(kubeClientConfig)
|
||||
aggregatorClientConfig.CAFile = path.Join(aggregatorCertDir, "apiserver.crt")
|
||||
aggregatorClientConfig.CAData = nil
|
||||
aggregatorClientConfig.ServerName = ""
|
||||
aggregatorClientConfig.BearerToken = kubeClientConfig.BearerToken
|
||||
var aggregatorDiscoveryClient client.Interface
|
||||
err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (done bool, err error) {
|
||||
aggregatorClientConfig.Host = fmt.Sprintf("https://127.0.0.1:%d", atomic.LoadInt32(aggregatorPort))
|
||||
aggregatorDiscoveryClient, err = client.NewForConfig(aggregatorClientConfig)
|
||||
if err != nil {
|
||||
// this happens if we race the API server for writing the cert
|
||||
return false, nil
|
||||
}
|
||||
if _, err := aggregatorDiscoveryClient.Discovery().ServerVersion(); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// now we're finally ready to test. These are what's run by defautl now
|
||||
testAPIGroupList(t, wardleClient.Discovery().RESTClient())
|
||||
testAPIGroup(t, wardleClient.Discovery().RESTClient())
|
||||
testAPIResourceList(t, wardleClient.Discovery().RESTClient())
|
||||
|
||||
wardleCA, err := ioutil.ReadFile(wardleClientConfig.CAFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
aggregatorClient := aggregatorclient.NewForConfigOrDie(aggregatorClientConfig)
|
||||
_, err = aggregatorClient.ApiregistrationV1beta1().APIServices().Create(&apiregistrationv1beta1.APIService{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "v1alpha1.wardle.k8s.io"},
|
||||
Spec: apiregistrationv1beta1.APIServiceSpec{
|
||||
Service: &apiregistrationv1beta1.ServiceReference{
|
||||
Namespace: "kube-wardle",
|
||||
Name: "api",
|
||||
},
|
||||
Group: "wardle.k8s.io",
|
||||
Version: "v1alpha1",
|
||||
CABundle: wardleCA,
|
||||
GroupPriorityMinimum: 200,
|
||||
VersionPriority: 200,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// this is ugly, but sleep just a little bit so that the watch is probably observed. Since nothing will actually be added to discovery
|
||||
// (the service is missing), we don't have an external signal.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if _, err := aggregatorDiscoveryClient.Discovery().ServerResources(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = aggregatorClient.ApiregistrationV1beta1().APIServices().Create(&apiregistrationv1beta1.APIService{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "v1."},
|
||||
Spec: apiregistrationv1beta1.APIServiceSpec{
|
||||
// register this as a loca service so it doesn't try to lookup the default kubernetes service
|
||||
// which will have an unroutable IP address since its fake.
|
||||
Group: "",
|
||||
Version: "v1",
|
||||
GroupPriorityMinimum: 100,
|
||||
VersionPriority: 100,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// this is ugly, but sleep just a little bit so that the watch is probably observed. Since nothing will actually be added to discovery
|
||||
// (the service is missing), we don't have an external signal.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
_, err = aggregatorDiscoveryClient.Discovery().ServerResources()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// TODO figure out how to turn on enough of services and dns to run more
|
||||
}
|
||||
|
||||
func createKubeConfig(clientCfg *rest.Config) *clientcmdapi.Config {
|
||||
clusterNick := "cluster"
|
||||
userNick := "user"
|
||||
contextNick := "context"
|
||||
|
||||
config := clientcmdapi.NewConfig()
|
||||
|
||||
credentials := clientcmdapi.NewAuthInfo()
|
||||
credentials.Token = clientCfg.BearerToken
|
||||
credentials.ClientCertificate = clientCfg.TLSClientConfig.CertFile
|
||||
if len(credentials.ClientCertificate) == 0 {
|
||||
credentials.ClientCertificateData = clientCfg.TLSClientConfig.CertData
|
||||
}
|
||||
credentials.ClientKey = clientCfg.TLSClientConfig.KeyFile
|
||||
if len(credentials.ClientKey) == 0 {
|
||||
credentials.ClientKeyData = clientCfg.TLSClientConfig.KeyData
|
||||
}
|
||||
config.AuthInfos[userNick] = credentials
|
||||
|
||||
cluster := clientcmdapi.NewCluster()
|
||||
cluster.Server = clientCfg.Host
|
||||
cluster.CertificateAuthority = clientCfg.CAFile
|
||||
if len(cluster.CertificateAuthority) == 0 {
|
||||
cluster.CertificateAuthorityData = clientCfg.CAData
|
||||
}
|
||||
cluster.InsecureSkipTLSVerify = clientCfg.Insecure
|
||||
config.Clusters[clusterNick] = cluster
|
||||
|
||||
context := clientcmdapi.NewContext()
|
||||
context.Cluster = clusterNick
|
||||
context.AuthInfo = userNick
|
||||
config.Contexts[contextNick] = context
|
||||
config.CurrentContext = contextNick
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
func readResponse(client rest.Interface, location string) ([]byte, error) {
|
||||
return client.Get().AbsPath(location).DoRaw()
|
||||
}
|
||||
|
||||
func testAPIGroupList(t *testing.T, client rest.Interface) {
|
||||
contents, err := readResponse(client, "/apis")
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
t.Log(string(contents))
|
||||
var apiGroupList metav1.APIGroupList
|
||||
err = json.Unmarshal(contents, &apiGroupList)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in unmarshalling response from server %s: %v", "/apis", err)
|
||||
}
|
||||
assert.Equal(t, 1, len(apiGroupList.Groups))
|
||||
assert.Equal(t, groupVersion.Group, apiGroupList.Groups[0].Name)
|
||||
assert.Equal(t, 1, len(apiGroupList.Groups[0].Versions))
|
||||
assert.Equal(t, groupVersionForDiscovery, apiGroupList.Groups[0].Versions[0])
|
||||
assert.Equal(t, groupVersionForDiscovery, apiGroupList.Groups[0].PreferredVersion)
|
||||
}
|
||||
|
||||
func testAPIGroup(t *testing.T, client rest.Interface) {
|
||||
contents, err := readResponse(client, "/apis/wardle.k8s.io")
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
t.Log(string(contents))
|
||||
var apiGroup metav1.APIGroup
|
||||
err = json.Unmarshal(contents, &apiGroup)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in unmarshalling response from server %s: %v", "/apis/wardle.k8s.io", err)
|
||||
}
|
||||
assert.Equal(t, groupVersion.Group, apiGroup.Name)
|
||||
assert.Equal(t, 1, len(apiGroup.Versions))
|
||||
assert.Equal(t, groupVersion.String(), apiGroup.Versions[0].GroupVersion)
|
||||
assert.Equal(t, groupVersion.Version, apiGroup.Versions[0].Version)
|
||||
assert.Equal(t, apiGroup.PreferredVersion, apiGroup.Versions[0])
|
||||
}
|
||||
|
||||
func testAPIResourceList(t *testing.T, client rest.Interface) {
|
||||
contents, err := readResponse(client, "/apis/wardle.k8s.io/v1alpha1")
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
t.Log(string(contents))
|
||||
var apiResourceList metav1.APIResourceList
|
||||
err = json.Unmarshal(contents, &apiResourceList)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in unmarshalling response from server %s: %v", "/apis/wardle.k8s.io/v1alpha1", err)
|
||||
}
|
||||
assert.Equal(t, groupVersion.String(), apiResourceList.GroupVersion)
|
||||
assert.Equal(t, 2, len(apiResourceList.APIResources))
|
||||
assert.Equal(t, "fischers", apiResourceList.APIResources[0].Name)
|
||||
assert.False(t, apiResourceList.APIResources[0].Namespaced)
|
||||
assert.Equal(t, "flunders", apiResourceList.APIResources[1].Name)
|
||||
assert.True(t, apiResourceList.APIResources[1].Namespaced)
|
||||
}
|
||||
|
||||
const (
|
||||
policyCachePollInterval = 100 * time.Millisecond
|
||||
policyCachePollTimeout = 5 * time.Second
|
||||
)
|
27
vendor/k8s.io/kubernetes/test/integration/examples/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/examples/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apiserver
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
84
vendor/k8s.io/kubernetes/test/integration/framework/BUILD
generated
vendored
Normal file
84
vendor/k8s.io/kubernetes/test/integration/framework/BUILD
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"etcd.go",
|
||||
"master_utils.go",
|
||||
"perf_utils.go",
|
||||
"serializer.go",
|
||||
"util.go",
|
||||
],
|
||||
data = [
|
||||
"@com_coreos_etcd//:etcd",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/framework",
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/batch:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/policy/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/replication:go_default_library",
|
||||
"//pkg/generated/openapi:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/kubelet/client:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//pkg/util/env:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//plugin/pkg/admission/admit:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/go-openapi/spec:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/pborman/uuid:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/request/union:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/union:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/storage:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
109
vendor/k8s.io/kubernetes/test/integration/framework/etcd.go
generated
vendored
Normal file
109
vendor/k8s.io/kubernetes/test/integration/framework/etcd.go
generated
vendored
Normal file
@ -0,0 +1,109 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/adler32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/env"
|
||||
)
|
||||
|
||||
var (
|
||||
etcdSetup sync.Once
|
||||
etcdURL = ""
|
||||
)
|
||||
|
||||
func setupETCD() {
|
||||
etcdSetup.Do(func() {
|
||||
if os.Getenv("RUNFILES_DIR") == "" {
|
||||
etcdURL = env.GetEnvAsStringOrFallback("KUBE_INTEGRATION_ETCD_URL", "http://127.0.0.1:2379")
|
||||
return
|
||||
}
|
||||
etcdPath := filepath.Join(os.Getenv("RUNFILES_DIR"), "com_coreos_etcd/etcd")
|
||||
// give every test the same random port each run
|
||||
etcdPort := 20000 + rand.New(rand.NewSource(int64(adler32.Checksum([]byte(os.Args[0]))))).Intn(5000)
|
||||
etcdURL = fmt.Sprintf("http://127.0.0.1:%d", etcdPort)
|
||||
|
||||
info, err := os.Stat(etcdPath)
|
||||
if err != nil {
|
||||
glog.Fatalf("Unable to stat etcd: %v", err)
|
||||
}
|
||||
if info.IsDir() {
|
||||
glog.Fatalf("Did not expect %q to be a directory", etcdPath)
|
||||
}
|
||||
|
||||
etcdDataDir, err := ioutil.TempDir(os.TempDir(), "integration_test_etcd_data")
|
||||
if err != nil {
|
||||
glog.Fatalf("Unable to make temp etcd data dir: %v", err)
|
||||
}
|
||||
glog.Infof("storing etcd data in: %v", etcdDataDir)
|
||||
|
||||
etcdCmd := exec.Command(
|
||||
etcdPath,
|
||||
"--data-dir",
|
||||
etcdDataDir,
|
||||
"--listen-client-urls",
|
||||
GetEtcdURL(),
|
||||
"--advertise-client-urls",
|
||||
GetEtcdURL(),
|
||||
"--listen-peer-urls",
|
||||
"http://127.0.0.1:0",
|
||||
)
|
||||
|
||||
stdout, err := etcdCmd.StdoutPipe()
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to run etcd: %v", err)
|
||||
}
|
||||
stderr, err := etcdCmd.StderrPipe()
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to run etcd: %v", err)
|
||||
}
|
||||
if err := etcdCmd.Start(); err != nil {
|
||||
glog.Fatalf("Failed to run etcd: %v", err)
|
||||
}
|
||||
|
||||
go io.Copy(os.Stdout, stdout)
|
||||
go io.Copy(os.Stderr, stderr)
|
||||
|
||||
go func() {
|
||||
if err := etcdCmd.Wait(); err != nil {
|
||||
glog.Fatalf("Failed to run etcd: %v", err)
|
||||
}
|
||||
glog.Fatalf("etcd should not have succeeded")
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
func EtcdMain(tests func() int) {
|
||||
setupETCD()
|
||||
os.Exit(tests())
|
||||
}
|
||||
|
||||
// return the EtcdURL
|
||||
func GetEtcdURL() string {
|
||||
return etcdURL
|
||||
}
|
467
vendor/k8s.io/kubernetes/test/integration/framework/master_utils.go
generated
vendored
Normal file
467
vendor/k8s.io/kubernetes/test/integration/framework/master_utils.go
generated
vendored
Normal file
@ -0,0 +1,467 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"path"
|
||||
goruntime "runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pborman/uuid"
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
autoscaling "k8s.io/api/autoscaling/v1"
|
||||
certificates "k8s.io/api/certificates/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
rbac "k8s.io/api/rbac/v1alpha1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
authauthenticator "k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
"k8s.io/apiserver/pkg/authentication/authenticatorfactory"
|
||||
authenticatorunion "k8s.io/apiserver/pkg/authentication/request/union"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
authauthorizer "k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizerfactory"
|
||||
authorizerunion "k8s.io/apiserver/pkg/authorization/union"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/apiserver/pkg/server/options"
|
||||
serverstorage "k8s.io/apiserver/pkg/server/storage"
|
||||
"k8s.io/apiserver/pkg/storage/storagebackend"
|
||||
"k8s.io/client-go/informers"
|
||||
extinformers "k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
||||
"k8s.io/kubernetes/pkg/generated/openapi"
|
||||
kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
|
||||
"k8s.io/kubernetes/pkg/master"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/admit"
|
||||
)
|
||||
|
||||
const (
|
||||
// Timeout used in benchmarks, to eg: scale an rc
|
||||
DefaultTimeout = 30 * time.Minute
|
||||
|
||||
// Rc manifest used to create pods for benchmarks.
|
||||
// TODO: Convert this to a full path?
|
||||
TestRCManifest = "benchmark-controller.json"
|
||||
)
|
||||
|
||||
// MasterComponents is a control struct for all master components started via NewMasterComponents.
|
||||
// TODO: Include all master components (scheduler, nodecontroller).
|
||||
// TODO: Reconcile with integration.go, currently the master used there doesn't understand
|
||||
// how to restart cleanly, which is required for each iteration of a benchmark. The integration
|
||||
// tests also don't make it easy to isolate and turn off components at will.
|
||||
type MasterComponents struct {
|
||||
// Raw http server in front of the master
|
||||
ApiServer *httptest.Server
|
||||
// Kubernetes master, contains an embedded etcd storage
|
||||
KubeMaster *master.Master
|
||||
// Restclient used to talk to the kubernetes master
|
||||
ClientSet clientset.Interface
|
||||
// Replication controller manager
|
||||
ControllerManager *replicationcontroller.ReplicationManager
|
||||
// CloseFn shuts down the server
|
||||
CloseFn CloseFunc
|
||||
// Channel for stop signals to rc manager
|
||||
rcStopCh chan struct{}
|
||||
// Used to stop master components individually, and via MasterComponents.Stop
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
// Config is a struct of configuration directives for NewMasterComponents.
|
||||
type Config struct {
|
||||
// If nil, a default is used, partially filled configs will not get populated.
|
||||
MasterConfig *master.Config
|
||||
StartReplicationManager bool
|
||||
// Client throttling qps
|
||||
QPS float32
|
||||
// Client burst qps, also burst replicas allowed in rc manager
|
||||
Burst int
|
||||
// TODO: Add configs for endpoints controller, scheduler etc
|
||||
}
|
||||
|
||||
// NewMasterComponents creates, initializes and starts master components based on the given config.
|
||||
func NewMasterComponents(c *Config) *MasterComponents {
|
||||
m, s, closeFn := startMasterOrDie(c.MasterConfig, nil, nil)
|
||||
// TODO: Allow callers to pipe through a different master url and create a client/start components using it.
|
||||
glog.Infof("Master %+v", s.URL)
|
||||
// TODO: caesarxuchao: remove this client when the refactoring of client libraray is done.
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}, QPS: c.QPS, Burst: c.Burst})
|
||||
rcStopCh := make(chan struct{})
|
||||
informerFactory := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
||||
controllerManager := replicationcontroller.NewReplicationManager(informerFactory.Core().V1().Pods(), informerFactory.Core().V1().ReplicationControllers(), clientset, c.Burst)
|
||||
|
||||
// TODO: Support events once we can cleanly shutdown an event recorder.
|
||||
controllerManager.SetEventRecorder(&record.FakeRecorder{})
|
||||
if c.StartReplicationManager {
|
||||
informerFactory.Start(rcStopCh)
|
||||
go controllerManager.Run(goruntime.NumCPU(), rcStopCh)
|
||||
}
|
||||
return &MasterComponents{
|
||||
ApiServer: s,
|
||||
KubeMaster: m,
|
||||
ClientSet: clientset,
|
||||
ControllerManager: controllerManager,
|
||||
CloseFn: closeFn,
|
||||
rcStopCh: rcStopCh,
|
||||
}
|
||||
}
|
||||
|
||||
// alwaysAllow always allows an action
|
||||
type alwaysAllow struct{}
|
||||
|
||||
func (alwaysAllow) Authorize(requestAttributes authauthorizer.Attributes) (authorizer.Decision, string, error) {
|
||||
return authorizer.DecisionAllow, "always allow", nil
|
||||
}
|
||||
|
||||
// alwaysEmpty simulates "no authentication" for old tests
|
||||
func alwaysEmpty(req *http.Request) (user.Info, bool, error) {
|
||||
return &user.DefaultInfo{
|
||||
Name: "",
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
// MasterReceiver can be used to provide the master to a custom incoming server function
|
||||
type MasterReceiver interface {
|
||||
SetMaster(m *master.Master)
|
||||
}
|
||||
|
||||
// MasterHolder implements
|
||||
type MasterHolder struct {
|
||||
Initialized chan struct{}
|
||||
M *master.Master
|
||||
}
|
||||
|
||||
func (h *MasterHolder) SetMaster(m *master.Master) {
|
||||
h.M = m
|
||||
close(h.Initialized)
|
||||
}
|
||||
|
||||
// startMasterOrDie starts a kubernetes master and an httpserver to handle api requests
|
||||
func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) {
|
||||
var m *master.Master
|
||||
var s *httptest.Server
|
||||
|
||||
if incomingServer != nil {
|
||||
s = incomingServer
|
||||
} else {
|
||||
s = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
m.GenericAPIServer.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
closeFn := func() {
|
||||
close(stopCh)
|
||||
s.Close()
|
||||
}
|
||||
|
||||
if masterConfig == nil {
|
||||
masterConfig = NewMasterConfig()
|
||||
masterConfig.GenericConfig.EnableProfiling = true
|
||||
masterConfig.GenericConfig.EnableMetrics = true
|
||||
masterConfig.GenericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(openapi.GetOpenAPIDefinitions, legacyscheme.Scheme)
|
||||
masterConfig.GenericConfig.OpenAPIConfig.Info = &spec.Info{
|
||||
InfoProps: spec.InfoProps{
|
||||
Title: "Kubernetes",
|
||||
Version: "unversioned",
|
||||
},
|
||||
}
|
||||
masterConfig.GenericConfig.OpenAPIConfig.DefaultResponse = &spec.Response{
|
||||
ResponseProps: spec.ResponseProps{
|
||||
Description: "Default Response.",
|
||||
},
|
||||
}
|
||||
masterConfig.GenericConfig.OpenAPIConfig.GetDefinitions = openapi.GetOpenAPIDefinitions
|
||||
masterConfig.GenericConfig.SwaggerConfig = genericapiserver.DefaultSwaggerConfig()
|
||||
}
|
||||
|
||||
// set the loopback client config
|
||||
if masterConfig.GenericConfig.LoopbackClientConfig == nil {
|
||||
masterConfig.GenericConfig.LoopbackClientConfig = &restclient.Config{QPS: 50, Burst: 100, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}}
|
||||
}
|
||||
masterConfig.GenericConfig.LoopbackClientConfig.Host = s.URL
|
||||
|
||||
privilegedLoopbackToken := uuid.NewRandom().String()
|
||||
// wrap any available authorizer
|
||||
tokens := make(map[string]*user.DefaultInfo)
|
||||
tokens[privilegedLoopbackToken] = &user.DefaultInfo{
|
||||
Name: user.APIServerUser,
|
||||
UID: uuid.NewRandom().String(),
|
||||
Groups: []string{user.SystemPrivilegedGroup},
|
||||
}
|
||||
|
||||
tokenAuthenticator := authenticatorfactory.NewFromTokens(tokens)
|
||||
if masterConfig.GenericConfig.Authenticator == nil {
|
||||
masterConfig.GenericConfig.Authenticator = authenticatorunion.New(tokenAuthenticator, authauthenticator.RequestFunc(alwaysEmpty))
|
||||
} else {
|
||||
masterConfig.GenericConfig.Authenticator = authenticatorunion.New(tokenAuthenticator, masterConfig.GenericConfig.Authenticator)
|
||||
}
|
||||
|
||||
if masterConfig.GenericConfig.Authorizer != nil {
|
||||
tokenAuthorizer := authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup)
|
||||
masterConfig.GenericConfig.Authorizer = authorizerunion.New(tokenAuthorizer, masterConfig.GenericConfig.Authorizer)
|
||||
} else {
|
||||
masterConfig.GenericConfig.Authorizer = alwaysAllow{}
|
||||
}
|
||||
|
||||
masterConfig.GenericConfig.LoopbackClientConfig.BearerToken = privilegedLoopbackToken
|
||||
|
||||
clientset, err := clientset.NewForConfig(masterConfig.GenericConfig.LoopbackClientConfig)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
|
||||
sharedInformers := extinformers.NewSharedInformerFactory(clientset, masterConfig.GenericConfig.LoopbackClientConfig.Timeout)
|
||||
m, err = masterConfig.Complete(sharedInformers).New(genericapiserver.EmptyDelegate)
|
||||
if err != nil {
|
||||
closeFn()
|
||||
glog.Fatalf("error in bringing up the master: %v", err)
|
||||
}
|
||||
if masterReceiver != nil {
|
||||
masterReceiver.SetMaster(m)
|
||||
}
|
||||
|
||||
// TODO have this start method actually use the normal start sequence for the API server
|
||||
// this method never actually calls the `Run` method for the API server
|
||||
// fire the post hooks ourselves
|
||||
m.GenericAPIServer.PrepareRun()
|
||||
m.GenericAPIServer.RunPostStartHooks(stopCh)
|
||||
|
||||
cfg := *masterConfig.GenericConfig.LoopbackClientConfig
|
||||
cfg.ContentConfig.GroupVersion = &schema.GroupVersion{}
|
||||
privilegedClient, err := restclient.RESTClientFor(&cfg)
|
||||
if err != nil {
|
||||
closeFn()
|
||||
glog.Fatal(err)
|
||||
}
|
||||
err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
result := privilegedClient.Get().AbsPath("/healthz").Do()
|
||||
status := 0
|
||||
result.StatusCode(&status)
|
||||
if status == 200 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
closeFn()
|
||||
glog.Fatal(err)
|
||||
}
|
||||
|
||||
return m, s, closeFn
|
||||
}
|
||||
|
||||
// Returns a basic master config.
|
||||
func NewMasterConfig() *master.Config {
|
||||
// This causes the integration tests to exercise the etcd
|
||||
// prefix code, so please don't change without ensuring
|
||||
// sufficient coverage in other ways.
|
||||
etcdOptions := options.NewEtcdOptions(storagebackend.NewDefaultConfig(uuid.New(), nil))
|
||||
etcdOptions.StorageConfig.ServerList = []string{GetEtcdURL()}
|
||||
|
||||
info, _ := runtime.SerializerInfoForMediaType(legacyscheme.Codecs.SupportedMediaTypes(), runtime.ContentTypeJSON)
|
||||
ns := NewSingleContentTypeSerializer(legacyscheme.Scheme, info)
|
||||
|
||||
resourceEncoding := serverstorage.NewDefaultResourceEncodingConfig(legacyscheme.Registry)
|
||||
// FIXME (soltysh): this GroupVersionResource override should be configurable
|
||||
// we need to set both for the whole group and for cronjobs, separately
|
||||
resourceEncoding.SetVersionEncoding(batch.GroupName, *testapi.Batch.GroupVersion(), schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal})
|
||||
resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: "batch", Resource: "cronjobs"}, schema.GroupVersion{Group: batch.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal})
|
||||
|
||||
storageFactory := serverstorage.NewDefaultStorageFactory(etcdOptions.StorageConfig, runtime.ContentTypeJSON, ns, resourceEncoding, master.DefaultAPIResourceConfigSource(), nil)
|
||||
storageFactory.SetSerializer(
|
||||
schema.GroupResource{Group: v1.GroupName, Resource: serverstorage.AllResources},
|
||||
"",
|
||||
ns)
|
||||
storageFactory.SetSerializer(
|
||||
schema.GroupResource{Group: autoscaling.GroupName, Resource: serverstorage.AllResources},
|
||||
"",
|
||||
ns)
|
||||
storageFactory.SetSerializer(
|
||||
schema.GroupResource{Group: batch.GroupName, Resource: serverstorage.AllResources},
|
||||
"",
|
||||
ns)
|
||||
storageFactory.SetSerializer(
|
||||
schema.GroupResource{Group: apps.GroupName, Resource: serverstorage.AllResources},
|
||||
"",
|
||||
ns)
|
||||
storageFactory.SetSerializer(
|
||||
schema.GroupResource{Group: extensions.GroupName, Resource: serverstorage.AllResources},
|
||||
"",
|
||||
ns)
|
||||
storageFactory.SetSerializer(
|
||||
schema.GroupResource{Group: policy.GroupName, Resource: serverstorage.AllResources},
|
||||
"",
|
||||
ns)
|
||||
storageFactory.SetSerializer(
|
||||
schema.GroupResource{Group: rbac.GroupName, Resource: serverstorage.AllResources},
|
||||
"",
|
||||
ns)
|
||||
storageFactory.SetSerializer(
|
||||
schema.GroupResource{Group: certificates.GroupName, Resource: serverstorage.AllResources},
|
||||
"",
|
||||
ns)
|
||||
storageFactory.SetSerializer(
|
||||
schema.GroupResource{Group: storage.GroupName, Resource: serverstorage.AllResources},
|
||||
"",
|
||||
ns)
|
||||
|
||||
genericConfig := genericapiserver.NewConfig(legacyscheme.Codecs)
|
||||
kubeVersion := version.Get()
|
||||
genericConfig.Version = &kubeVersion
|
||||
genericConfig.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer()
|
||||
genericConfig.AdmissionControl = admit.NewAlwaysAdmit()
|
||||
genericConfig.EnableMetrics = true
|
||||
|
||||
err := etcdOptions.ApplyWithStorageFactoryTo(storageFactory, genericConfig)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &master.Config{
|
||||
GenericConfig: genericConfig,
|
||||
ExtraConfig: master.ExtraConfig{
|
||||
APIResourceConfigSource: master.DefaultAPIResourceConfigSource(),
|
||||
StorageFactory: storageFactory,
|
||||
EnableCoreControllers: true,
|
||||
KubeletClientConfig: kubeletclient.KubeletClientConfig{Port: 10250},
|
||||
APIServerServicePort: 443,
|
||||
MasterCount: 1,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the master config appropriate for most integration tests.
|
||||
func NewIntegrationTestMasterConfig() *master.Config {
|
||||
masterConfig := NewMasterConfig()
|
||||
masterConfig.ExtraConfig.EnableCoreControllers = true
|
||||
masterConfig.GenericConfig.PublicAddress = net.ParseIP("192.168.10.4")
|
||||
masterConfig.ExtraConfig.APIResourceConfigSource = master.DefaultAPIResourceConfigSource()
|
||||
return masterConfig
|
||||
}
|
||||
|
||||
func (m *MasterComponents) stopRCManager() {
|
||||
close(m.rcStopCh)
|
||||
}
|
||||
|
||||
func (m *MasterComponents) Stop(apiServer, rcManager bool) {
|
||||
glog.Infof("Stopping master components")
|
||||
if rcManager {
|
||||
// Ordering matters because the apiServer will only shutdown when pending
|
||||
// requests are done
|
||||
m.once.Do(m.stopRCManager)
|
||||
}
|
||||
if apiServer {
|
||||
m.CloseFn()
|
||||
}
|
||||
}
|
||||
|
||||
// CloseFunc can be called to cleanup the master
|
||||
type CloseFunc func()
|
||||
|
||||
func RunAMaster(masterConfig *master.Config) (*master.Master, *httptest.Server, CloseFunc) {
|
||||
if masterConfig == nil {
|
||||
masterConfig = NewMasterConfig()
|
||||
masterConfig.GenericConfig.EnableProfiling = true
|
||||
masterConfig.GenericConfig.EnableMetrics = true
|
||||
}
|
||||
return startMasterOrDie(masterConfig, nil, nil)
|
||||
}
|
||||
|
||||
func RunAMasterUsingServer(masterConfig *master.Config, s *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) {
|
||||
return startMasterOrDie(masterConfig, s, masterReceiver)
|
||||
}
|
||||
|
||||
// Task is a function passed to worker goroutines by RunParallel.
|
||||
// The function needs to implement its own thread safety.
|
||||
type Task func(id int) error
|
||||
|
||||
// RunParallel spawns a goroutine per task in the given queue
|
||||
func RunParallel(task Task, numTasks, numWorkers int) {
|
||||
start := time.Now()
|
||||
if numWorkers <= 0 {
|
||||
numWorkers = numTasks
|
||||
}
|
||||
defer func() {
|
||||
glog.Infof("RunParallel took %v for %d tasks and %d workers", time.Since(start), numTasks, numWorkers)
|
||||
}()
|
||||
var wg sync.WaitGroup
|
||||
semCh := make(chan struct{}, numWorkers)
|
||||
wg.Add(numTasks)
|
||||
for id := 0; id < numTasks; id++ {
|
||||
go func(id int) {
|
||||
semCh <- struct{}{}
|
||||
err := task(id)
|
||||
if err != nil {
|
||||
glog.Fatalf("Worker failed with %v", err)
|
||||
}
|
||||
<-semCh
|
||||
wg.Done()
|
||||
}(id)
|
||||
}
|
||||
wg.Wait()
|
||||
close(semCh)
|
||||
}
|
||||
|
||||
// FindFreeLocalPort returns the number of an available port number on
|
||||
// the loopback interface. Useful for determining the port to launch
|
||||
// a server on. Error handling required - there is a non-zero chance
|
||||
// that the returned port number will be bound by another process
|
||||
// after this function returns.
|
||||
func FindFreeLocalPort() (int, error) {
|
||||
l, err := net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer l.Close()
|
||||
_, portStr, err := net.SplitHostPort(l.Addr().String())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return port, nil
|
||||
}
|
||||
|
||||
// SharedEtcd creates a storage config for a shared etcd instance, with a unique prefix.
|
||||
func SharedEtcd() *storagebackend.Config {
|
||||
cfg := storagebackend.NewDefaultConfig(path.Join(uuid.New(), "registry"), nil)
|
||||
cfg.ServerList = []string{GetEtcdURL()}
|
||||
return cfg
|
||||
}
|
104
vendor/k8s.io/kubernetes/test/integration/framework/perf_utils.go
generated
vendored
Normal file
104
vendor/k8s.io/kubernetes/test/integration/framework/perf_utils.go
generated
vendored
Normal file
@ -0,0 +1,104 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
e2eframework "k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
retries = 5
|
||||
)
|
||||
|
||||
type IntegrationTestNodePreparer struct {
|
||||
client clientset.Interface
|
||||
countToStrategy []testutils.CountToStrategy
|
||||
nodeNamePrefix string
|
||||
}
|
||||
|
||||
func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy, nodeNamePrefix string) testutils.TestNodePreparer {
|
||||
return &IntegrationTestNodePreparer{
|
||||
client: client,
|
||||
countToStrategy: countToStrategy,
|
||||
nodeNamePrefix: nodeNamePrefix,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *IntegrationTestNodePreparer) PrepareNodes() error {
|
||||
numNodes := 0
|
||||
for _, v := range p.countToStrategy {
|
||||
numNodes += v.Count
|
||||
}
|
||||
|
||||
glog.Infof("Making %d nodes", numNodes)
|
||||
baseNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: p.nodeNamePrefix,
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
// TODO: investigate why this is needed.
|
||||
ExternalID: "foo",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("32Gi"),
|
||||
},
|
||||
Phase: v1.NodeRunning,
|
||||
Conditions: []v1.NodeCondition{
|
||||
{Type: v1.NodeReady, Status: v1.ConditionTrue},
|
||||
},
|
||||
},
|
||||
}
|
||||
for i := 0; i < numNodes; i++ {
|
||||
if _, err := p.client.CoreV1().Nodes().Create(baseNode); err != nil {
|
||||
glog.Fatalf("Error creating node: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
|
||||
index := 0
|
||||
sum := 0
|
||||
for _, v := range p.countToStrategy {
|
||||
sum += v.Count
|
||||
for ; index < sum; index++ {
|
||||
if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil {
|
||||
glog.Errorf("Aborting node preparation: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *IntegrationTestNodePreparer) CleanupNodes() error {
|
||||
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
|
||||
for i := range nodes.Items {
|
||||
if err := p.client.CoreV1().Nodes().Delete(nodes.Items[i].Name, &metav1.DeleteOptions{}); err != nil {
|
||||
glog.Errorf("Error while deleting Node: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
53
vendor/k8s.io/kubernetes/test/integration/framework/serializer.go
generated
vendored
Normal file
53
vendor/k8s.io/kubernetes/test/integration/framework/serializer.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer/versioning"
|
||||
)
|
||||
|
||||
// NewSingleContentTypeSerializer wraps a serializer in a NegotiatedSerializer that handles one content type
|
||||
func NewSingleContentTypeSerializer(scheme *runtime.Scheme, info runtime.SerializerInfo) runtime.StorageSerializer {
|
||||
return &wrappedSerializer{
|
||||
scheme: scheme,
|
||||
info: info,
|
||||
}
|
||||
}
|
||||
|
||||
type wrappedSerializer struct {
|
||||
scheme *runtime.Scheme
|
||||
info runtime.SerializerInfo
|
||||
}
|
||||
|
||||
var _ runtime.StorageSerializer = &wrappedSerializer{}
|
||||
|
||||
func (s *wrappedSerializer) SupportedMediaTypes() []runtime.SerializerInfo {
|
||||
return []runtime.SerializerInfo{s.info}
|
||||
}
|
||||
|
||||
func (s *wrappedSerializer) UniversalDeserializer() runtime.Decoder {
|
||||
return s.info.Serializer
|
||||
}
|
||||
|
||||
func (s *wrappedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder {
|
||||
return versioning.NewCodec(encoder, nil, s.scheme, s.scheme, s.scheme, s.scheme, gv, nil)
|
||||
}
|
||||
|
||||
func (s *wrappedSerializer) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder {
|
||||
return versioning.NewCodec(nil, decoder, s.scheme, s.scheme, s.scheme, s.scheme, nil, gv)
|
||||
}
|
127
vendor/k8s.io/kubernetes/test/integration/framework/util.go
generated
vendored
Normal file
127
vendor/k8s.io/kubernetes/test/integration/framework/util.go
generated
vendored
Normal file
@ -0,0 +1,127 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// TODO: This file can potentially be moved to a common place used by both e2e and integration tests.
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
)
|
||||
|
||||
const (
|
||||
// When these values are updated, also update cmd/kubelet/app/options/options.go
|
||||
// A copy of these values exist in e2e/framework/util.go.
|
||||
currentPodInfraContainerImageName = "gcr.io/google_containers/pause"
|
||||
currentPodInfraContainerImageVersion = "3.0"
|
||||
)
|
||||
|
||||
// GetServerArchitecture fetches the architecture of the cluster's apiserver.
|
||||
func GetServerArchitecture(c clientset.Interface) string {
|
||||
arch := ""
|
||||
sVer, err := c.Discovery().ServerVersion()
|
||||
if err != nil || sVer.Platform == "" {
|
||||
// If we failed to get the server version for some reason, default to amd64.
|
||||
arch = "amd64"
|
||||
} else {
|
||||
// Split the platform string into OS and Arch separately.
|
||||
// The platform string may for example be "linux/amd64", "linux/arm" or "windows/amd64".
|
||||
osArchArray := strings.Split(sVer.Platform, "/")
|
||||
arch = osArchArray[1]
|
||||
}
|
||||
return arch
|
||||
}
|
||||
|
||||
// GetPauseImageName fetches the pause image name for the same architecture as the apiserver.
|
||||
func GetPauseImageName(c clientset.Interface) string {
|
||||
return currentPodInfraContainerImageName + "-" + GetServerArchitecture(c) + ":" + currentPodInfraContainerImageVersion
|
||||
}
|
||||
|
||||
func CreateTestingNamespace(baseName string, apiserver *httptest.Server, t *testing.T) *v1.Namespace {
|
||||
// TODO: Create a namespace with a given basename.
|
||||
// Currently we neither create the namespace nor delete all of its contents at the end.
|
||||
// But as long as tests are not using the same namespaces, this should work fine.
|
||||
return &v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// TODO: Once we start creating namespaces, switch to GenerateName.
|
||||
Name: baseName,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func DeleteTestingNamespace(ns *v1.Namespace, apiserver *httptest.Server, t *testing.T) {
|
||||
// TODO: Remove all resources from a given namespace once we implement CreateTestingNamespace.
|
||||
}
|
||||
|
||||
// RCFromManifest reads a .json file and returns the rc in it.
|
||||
func RCFromManifest(fileName string) *v1.ReplicationController {
|
||||
data, err := ioutil.ReadFile(fileName)
|
||||
if err != nil {
|
||||
glog.Fatalf("Unexpected error reading rc manifest %v", err)
|
||||
}
|
||||
var controller v1.ReplicationController
|
||||
if err := runtime.DecodeInto(testapi.Default.Codec(), data, &controller); err != nil {
|
||||
glog.Fatalf("Unexpected error reading rc manifest %v", err)
|
||||
}
|
||||
return &controller
|
||||
}
|
||||
|
||||
// StopRC stops the rc via kubectl's stop library
|
||||
func StopRC(rc *v1.ReplicationController, clientset internalclientset.Interface) error {
|
||||
reaper, err := kubectl.ReaperFor(api.Kind("ReplicationController"), clientset)
|
||||
if err != nil || reaper == nil {
|
||||
return err
|
||||
}
|
||||
err = reaper.Stop(rc.Namespace, rc.Name, 0, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ScaleRC scales the given rc to the given replicas.
|
||||
func ScaleRC(name, ns string, replicas int32, clientset internalclientset.Interface) (*api.ReplicationController, error) {
|
||||
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
retry := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout}
|
||||
waitForReplicas := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout}
|
||||
err = scaler.Scale(ns, name, uint(replicas), nil, retry, waitForReplicas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
scaled, err := clientset.Core().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return scaled, nil
|
||||
}
|
55
vendor/k8s.io/kubernetes/test/integration/garbagecollector/BUILD
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/test/integration/garbagecollector/BUILD
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"cluster_scoped_owner_test.go",
|
||||
"garbage_collector_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/garbagecollector",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app/testing:go_default_library",
|
||||
"//pkg/controller/garbagecollector:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery/cached:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
147
vendor/k8s.io/kubernetes/test/integration/garbagecollector/cluster_scoped_owner_test.go
generated
vendored
Normal file
147
vendor/k8s.io/kubernetes/test/integration/garbagecollector/cluster_scoped_owner_test.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package garbagecollector
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
type roundTripFunc func(req *http.Request) (*http.Response, error)
|
||||
|
||||
func (w roundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
return w(req)
|
||||
}
|
||||
|
||||
type readDelayer struct {
|
||||
delay time.Duration
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
func (b *readDelayer) Read(p []byte) (n int, err error) {
|
||||
defer time.Sleep(b.delay)
|
||||
return b.ReadCloser.Read(p)
|
||||
}
|
||||
|
||||
func TestClusterScopedOwners(t *testing.T) {
|
||||
// Start the test server and wrap the client to delay PV watch responses
|
||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, framework.SharedEtcd())
|
||||
server.ClientConfig.WrapTransport = func(rt http.RoundTripper) http.RoundTripper {
|
||||
return roundTripFunc(func(req *http.Request) (*http.Response, error) {
|
||||
if req.URL.Query().Get("watch") != "true" || !strings.Contains(req.URL.String(), "persistentvolumes") {
|
||||
return rt.RoundTrip(req)
|
||||
}
|
||||
resp, err := rt.RoundTrip(req)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
resp.Body = &readDelayer{30 * time.Second, resp.Body}
|
||||
return resp, err
|
||||
})
|
||||
}
|
||||
ctx := setupWithServer(t, server, 5)
|
||||
defer ctx.tearDown()
|
||||
|
||||
_, clientSet := ctx.gc, ctx.clientSet
|
||||
|
||||
ns := createNamespaceOrDie("gc-cluster-scope-deletion", clientSet, t)
|
||||
defer deleteNamespaceOrDie(ns.Name, clientSet, t)
|
||||
|
||||
t.Log("Create a pair of objects")
|
||||
pv, err := clientSet.CoreV1().PersistentVolumes().Create(&v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pv-valid"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/foo"}},
|
||||
Capacity: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Gi")},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Create(&v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cm-valid",
|
||||
OwnerReferences: []metav1.OwnerReference{{Kind: "PersistentVolume", APIVersion: "v1", Name: pv.Name, UID: pv.UID}},
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Log("Create a namespaced object with a missing parent")
|
||||
if _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Create(&v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cm-missing",
|
||||
Labels: map[string]string{"missing": "true"},
|
||||
OwnerReferences: []metav1.OwnerReference{{Kind: "PersistentVolume", APIVersion: "v1", Name: "missing-name", UID: types.UID("missing-uid")}},
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Log("Create a namespaced object with a missing type parent")
|
||||
if _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Create(&v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cm-invalid",
|
||||
OwnerReferences: []metav1.OwnerReference{{Kind: "UnknownType", APIVersion: "unknown.group/v1", Name: "invalid-name", UID: types.UID("invalid-uid")}},
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// wait for deletable children to go away
|
||||
if err := wait.Poll(5*time.Second, 300*time.Second, func() (bool, error) {
|
||||
_, err := clientSet.CoreV1().ConfigMaps(ns.Name).Get("cm-missing", metav1.GetOptions{})
|
||||
switch {
|
||||
case errors.IsNotFound(err):
|
||||
return true, nil
|
||||
case err != nil:
|
||||
return false, err
|
||||
default:
|
||||
t.Logf("cm with missing parent still exists, retrying")
|
||||
return false, nil
|
||||
}
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("deletable children removed")
|
||||
|
||||
// Give time for blocked children to be incorrectly cleaned up
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// ensure children with unverifiable parents don't get reaped
|
||||
if _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Get("cm-invalid", metav1.GetOptions{}); err != nil {
|
||||
t.Fatalf("child with invalid ownerRef is unexpectedly missing: %v", err)
|
||||
}
|
||||
|
||||
// ensure children with present parents don't get reaped
|
||||
if _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Get("cm-valid", metav1.GetOptions{}); err != nil {
|
||||
t.Fatalf("child with valid ownerRef is unexpectedly missing: %v", err)
|
||||
}
|
||||
}
|
1004
vendor/k8s.io/kubernetes/test/integration/garbagecollector/garbage_collector_test.go
generated
vendored
Normal file
1004
vendor/k8s.io/kubernetes/test/integration/garbagecollector/garbage_collector_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
27
vendor/k8s.io/kubernetes/test/integration/garbagecollector/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/garbagecollector/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package garbagecollector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
66
vendor/k8s.io/kubernetes/test/integration/master/BUILD
generated
vendored
Normal file
66
vendor/k8s.io/kubernetes/test/integration/master/BUILD
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"crd_test.go",
|
||||
"kube_apiserver_test.go",
|
||||
"main_test.go",
|
||||
"synthetic_master_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/master",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app/testing:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
||||
"//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/networking/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/group:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/features:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/tokentest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
345
vendor/k8s.io/kubernetes/test/integration/master/crd_test.go
generated
vendored
Normal file
345
vendor/k8s.io/kubernetes/test/integration/master/crd_test.go
generated
vendored
Normal file
@ -0,0 +1,345 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package master
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/features"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestCRDShadowGroup(t *testing.T) {
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
kubeclient, err := kubernetes.NewForConfig(result.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
apiextensionsclient, err := apiextensionsclientset.NewForConfig(result.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Creating a NetworkPolicy")
|
||||
nwPolicy, err := kubeclient.NetworkingV1().NetworkPolicies("default").Create(&networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PodSelector: metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
Ingress: []networkingv1.NetworkPolicyIngressRule{},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create NetworkPolicy: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Trying to shadow networking group")
|
||||
crd := &apiextensionsv1beta1.CustomResourceDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foos." + networkingv1.GroupName,
|
||||
},
|
||||
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
|
||||
Group: networkingv1.GroupName,
|
||||
Version: networkingv1.SchemeGroupVersion.Version,
|
||||
Scope: apiextensionsv1beta1.ClusterScoped,
|
||||
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
|
||||
Plural: "foos",
|
||||
Kind: "Foo",
|
||||
},
|
||||
},
|
||||
}
|
||||
if _, err = apiextensionsclient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd); err != nil {
|
||||
t.Fatalf("Failed to create networking group CRD: %v", err)
|
||||
}
|
||||
if err := waitForEstablishedCRD(apiextensionsclient, crd.Name); err != nil {
|
||||
t.Fatalf("Failed to establish networking group CRD: %v", err)
|
||||
}
|
||||
// wait to give aggregator time to update
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
t.Logf("Checking that we still see the NetworkPolicy")
|
||||
_, err = kubeclient.NetworkingV1().NetworkPolicies(nwPolicy.Namespace).Get(nwPolicy.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get NetworkPolocy: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Checking that crd resource does not show up in networking group")
|
||||
found, err := crdExistsInDiscovery(apiextensionsclient, crd)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected discovery error: %v", err)
|
||||
}
|
||||
if found {
|
||||
t.Errorf("CRD resource shows up in discovery, but shouldn't.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCRD(t *testing.T) {
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.Initializers, true)()
|
||||
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, []string{"--admission-control", "Initializers"}, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
kubeclient, err := kubernetes.NewForConfig(result.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
apiextensionsclient, err := apiextensionsclientset.NewForConfig(result.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Trying to create a custom resource without conflict")
|
||||
crd := &apiextensionsv1beta1.CustomResourceDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foos.cr.bar.com",
|
||||
},
|
||||
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
|
||||
Group: "cr.bar.com",
|
||||
Version: "v1",
|
||||
Scope: apiextensionsv1beta1.NamespaceScoped,
|
||||
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
|
||||
Plural: "foos",
|
||||
Kind: "Foo",
|
||||
},
|
||||
},
|
||||
}
|
||||
if _, err = apiextensionsclient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd); err != nil {
|
||||
t.Fatalf("Failed to create foos.cr.bar.com CRD; %v", err)
|
||||
}
|
||||
if err := waitForEstablishedCRD(apiextensionsclient, crd.Name); err != nil {
|
||||
t.Fatalf("Failed to establish foos.cr.bar.com CRD: %v", err)
|
||||
}
|
||||
if err := wait.PollImmediate(500*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
return crdExistsInDiscovery(apiextensionsclient, crd)
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to see foos.cr.bar.com in discovery: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Trying to access foos.cr.bar.com with dynamic client")
|
||||
barComConfig := *result.ClientConfig
|
||||
barComConfig.GroupVersion = &schema.GroupVersion{Group: "cr.bar.com", Version: "v1"}
|
||||
barComConfig.APIPath = "/apis"
|
||||
barComClient, err := dynamic.NewClient(&barComConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
_, err = barComClient.Resource(&metav1.APIResource{Name: "foos", Namespaced: true}, "default").List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to list foos.cr.bar.com instances: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Creating InitializerConfiguration")
|
||||
_, err = kubeclient.AdmissionregistrationV1alpha1().InitializerConfigurations().Create(&admissionregistrationv1alpha1.InitializerConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foos.cr.bar.com",
|
||||
},
|
||||
Initializers: []admissionregistrationv1alpha1.Initializer{
|
||||
{
|
||||
Name: "cr.bar.com",
|
||||
Rules: []admissionregistrationv1alpha1.Rule{
|
||||
{
|
||||
APIGroups: []string{"cr.bar.com"},
|
||||
APIVersions: []string{"*"},
|
||||
Resources: []string{"*"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create InitializerConfiguration: %v", err)
|
||||
}
|
||||
|
||||
// TODO DO NOT MERGE THIS
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
t.Logf("Creating Foo instance")
|
||||
foo := &Foo{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "cr.bar.com/v1",
|
||||
Kind: "Foo",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
|
||||
}
|
||||
unstructuredFoo, err := unstructuredFoo(foo)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to create Foo: %v", err)
|
||||
}
|
||||
createErr := make(chan error, 1)
|
||||
go func() {
|
||||
_, err := barComClient.Resource(&metav1.APIResource{Name: "foos", Namespaced: true}, "default").Create(unstructuredFoo)
|
||||
t.Logf("Foo instance create returned: %v", err)
|
||||
if err != nil {
|
||||
createErr <- err
|
||||
}
|
||||
}()
|
||||
|
||||
err = wait.PollImmediate(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
select {
|
||||
case createErr := <-createErr:
|
||||
return true, createErr
|
||||
default:
|
||||
}
|
||||
|
||||
t.Logf("Checking that Foo instance is visible with IncludeUninitialized=true")
|
||||
_, err := barComClient.Resource(&metav1.APIResource{Name: "foos", Namespaced: true}, "default").Get(foo.ObjectMeta.Name, metav1.GetOptions{
|
||||
IncludeUninitialized: true,
|
||||
})
|
||||
switch {
|
||||
case err == nil:
|
||||
return true, nil
|
||||
case errors.IsNotFound(err):
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Removing initializer from Foo instance")
|
||||
success := false
|
||||
for i := 0; i < 10; i++ {
|
||||
// would love to replace the following with a patch, but removing strings from the intitializer array
|
||||
// is not what JSON (Merge) patch authors had in mind.
|
||||
fooUnstructured, err := barComClient.Resource(&metav1.APIResource{Name: "foos", Namespaced: true}, "default").Get(foo.ObjectMeta.Name, metav1.GetOptions{
|
||||
IncludeUninitialized: true,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting Foo instance: %v", err)
|
||||
}
|
||||
bs, _ := fooUnstructured.MarshalJSON()
|
||||
t.Logf("Got Foo instance: %v", string(bs))
|
||||
foo := Foo{}
|
||||
if err := json.Unmarshal(bs, &foo); err != nil {
|
||||
t.Fatalf("Error parsing Foo instance: %v", err)
|
||||
}
|
||||
|
||||
// remove initialize
|
||||
if foo.ObjectMeta.Initializers == nil {
|
||||
t.Fatalf("Expected initializers to be set in Foo instance")
|
||||
}
|
||||
found := false
|
||||
for i := range foo.ObjectMeta.Initializers.Pending {
|
||||
if foo.ObjectMeta.Initializers.Pending[i].Name == "cr.bar.com" {
|
||||
foo.ObjectMeta.Initializers.Pending = append(foo.ObjectMeta.Initializers.Pending[:i], foo.ObjectMeta.Initializers.Pending[i+1:]...)
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("Expected cr.bar.com as initializer on Foo instance")
|
||||
}
|
||||
if len(foo.ObjectMeta.Initializers.Pending) == 0 && foo.ObjectMeta.Initializers.Result == nil {
|
||||
foo.ObjectMeta.Initializers = nil
|
||||
}
|
||||
bs, err = json.Marshal(&foo)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
fooUnstructured.UnmarshalJSON(bs)
|
||||
|
||||
_, err = barComClient.Resource(&metav1.APIResource{Name: "foos", Namespaced: true}, "default").Update(fooUnstructured)
|
||||
if err != nil && !errors.IsConflict(err) {
|
||||
t.Fatalf("Failed to update Foo instance: %v", err)
|
||||
} else if err == nil {
|
||||
success = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !success {
|
||||
t.Fatalf("Failed to remove initializer from Foo object")
|
||||
}
|
||||
|
||||
t.Logf("Checking that Foo instance is visible after removing the initializer")
|
||||
if _, err := barComClient.Resource(&metav1.APIResource{Name: "foos", Namespaced: true}, "default").Get(foo.ObjectMeta.Name, metav1.GetOptions{}); err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
type Foo struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
}
|
||||
|
||||
func unstructuredFoo(foo *Foo) (*unstructured.Unstructured, error) {
|
||||
bs, err := json.Marshal(foo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret := &unstructured.Unstructured{}
|
||||
if err = ret.UnmarshalJSON(bs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func waitForEstablishedCRD(client apiextensionsclientset.Interface, name string) error {
|
||||
return wait.PollImmediate(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
crd, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, cond := range crd.Status.Conditions {
|
||||
switch cond.Type {
|
||||
case apiextensionsv1beta1.Established:
|
||||
if cond.Status == apiextensionsv1beta1.ConditionTrue {
|
||||
return true, err
|
||||
}
|
||||
case apiextensionsv1beta1.NamesAccepted:
|
||||
if cond.Status == apiextensionsv1beta1.ConditionFalse {
|
||||
fmt.Printf("Name conflict: %v\n", cond.Reason)
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
func crdExistsInDiscovery(client apiextensionsclientset.Interface, crd *apiextensionsv1beta1.CustomResourceDefinition) (bool, error) {
|
||||
resourceList, err := client.Discovery().ServerResourcesForGroupVersion(crd.Spec.Group + "/" + crd.Spec.Version)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
for _, resource := range resourceList.APIResources {
|
||||
if resource.Name == crd.Spec.Names.Plural {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
74
vendor/k8s.io/kubernetes/test/integration/master/kube_apiserver_test.go
generated
vendored
Normal file
74
vendor/k8s.io/kubernetes/test/integration/master/kube_apiserver_test.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package master
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
appsv1beta1 "k8s.io/api/apps/v1beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
client, err := kubernetes.NewForConfig(result.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// test whether the server is really healthy after /healthz told us so
|
||||
t.Logf("Creating Deployment directly after being healthy")
|
||||
var replicas int32 = 1
|
||||
_, err = client.AppsV1beta1().Deployments("default").Create(&appsv1beta1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Deployment",
|
||||
APIVersion: "apps/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "test",
|
||||
},
|
||||
Spec: appsv1beta1.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Strategy: appsv1beta1.DeploymentStrategy{
|
||||
Type: appsv1beta1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "foo",
|
||||
Image: "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create deployment: %v", err)
|
||||
}
|
||||
}
|
27
vendor/k8s.io/kubernetes/test/integration/master/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/master/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package master
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
768
vendor/k8s.io/kubernetes/test/integration/master/synthetic_master_test.go
generated
vendored
Normal file
768
vendor/k8s.io/kubernetes/test/integration/master/synthetic_master_test.go
generated
vendored
Normal file
@ -0,0 +1,768 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package master
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/authentication/group"
|
||||
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizerfactory"
|
||||
"k8s.io/apiserver/plugin/pkg/authenticator/token/tokentest"
|
||||
clienttypedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/master"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
AliceToken string = "abc123" // username: alice. Present in token file.
|
||||
BobToken string = "xyz987" // username: bob. Present in token file.
|
||||
)
|
||||
|
||||
type allowAliceAuthorizer struct{}
|
||||
|
||||
func (allowAliceAuthorizer) Authorize(a authorizer.Attributes) (authorizer.Decision, string, error) {
|
||||
if a.GetUser() != nil && a.GetUser().GetName() == "alice" {
|
||||
return authorizer.DecisionAllow, "", nil
|
||||
}
|
||||
return authorizer.DecisionNoOpinion, "I can't allow that. Go ask alice.", nil
|
||||
}
|
||||
|
||||
func testPrefix(t *testing.T, prefix string) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
resp, err := http.Get(s.URL + prefix)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting %s prefix: %v", prefix, err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("got status %v instead of 200 OK", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutoscalingPrefix(t *testing.T) {
|
||||
testPrefix(t, "/apis/autoscaling/")
|
||||
}
|
||||
|
||||
func TestBatchPrefix(t *testing.T) {
|
||||
testPrefix(t, "/apis/batch/")
|
||||
}
|
||||
|
||||
func TestAppsPrefix(t *testing.T) {
|
||||
testPrefix(t, "/apis/apps/")
|
||||
}
|
||||
|
||||
func TestExtensionsPrefix(t *testing.T) {
|
||||
testPrefix(t, "/apis/extensions/")
|
||||
}
|
||||
|
||||
func TestKubernetesService(t *testing.T) {
|
||||
config := framework.NewMasterConfig()
|
||||
_, _, closeFn := framework.RunAMaster(config)
|
||||
defer closeFn()
|
||||
coreClient := clientset.NewForConfigOrDie(config.GenericConfig.LoopbackClientConfig)
|
||||
if _, err := coreClient.Core().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}); err != nil {
|
||||
t.Fatalf("Expected kubernetes service to exists, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyList(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
u := s.URL + "/api/v1/namespaces/default/pods"
|
||||
resp, err := http.Get(u)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting %s: %v", u, err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("got status %v instead of 200 OK", resp.StatusCode)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
data, _ := ioutil.ReadAll(resp.Body)
|
||||
decodedData := map[string]interface{}{}
|
||||
if err := json.Unmarshal(data, &decodedData); err != nil {
|
||||
t.Logf("body: %s", string(data))
|
||||
t.Fatalf("got error decoding data: %v", err)
|
||||
}
|
||||
if items, ok := decodedData["items"]; !ok {
|
||||
t.Logf("body: %s", string(data))
|
||||
t.Fatalf("missing items field in empty list (all lists should return an items field)")
|
||||
} else if items == nil {
|
||||
t.Logf("body: %s", string(data))
|
||||
t.Fatalf("nil items field from empty list (all lists should return non-nil empty items lists)")
|
||||
}
|
||||
}
|
||||
|
||||
func initStatusForbiddenMasterCongfig() *master.Config {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authorizer = authorizerfactory.NewAlwaysDenyAuthorizer()
|
||||
return masterConfig
|
||||
}
|
||||
|
||||
func initUnauthorizedMasterCongfig() *master.Config {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
tokenAuthenticator := tokentest.New()
|
||||
tokenAuthenticator.Tokens[AliceToken] = &user.DefaultInfo{Name: "alice", UID: "1"}
|
||||
tokenAuthenticator.Tokens[BobToken] = &user.DefaultInfo{Name: "bob", UID: "2"}
|
||||
masterConfig.GenericConfig.Authenticator = group.NewGroupAdder(bearertoken.New(tokenAuthenticator), []string{user.AllAuthenticated})
|
||||
masterConfig.GenericConfig.Authorizer = allowAliceAuthorizer{}
|
||||
return masterConfig
|
||||
}
|
||||
|
||||
func TestStatus(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
masterConfig *master.Config
|
||||
statusCode int
|
||||
reqPath string
|
||||
reason string
|
||||
message string
|
||||
}{
|
||||
{
|
||||
name: "404",
|
||||
masterConfig: nil,
|
||||
statusCode: http.StatusNotFound,
|
||||
reqPath: "/apis/batch/v1/namespaces/default/jobs/foo",
|
||||
reason: "NotFound",
|
||||
message: `jobs.batch "foo" not found`,
|
||||
},
|
||||
{
|
||||
name: "403",
|
||||
masterConfig: initStatusForbiddenMasterCongfig(),
|
||||
statusCode: http.StatusForbidden,
|
||||
reqPath: "/apis",
|
||||
reason: "Forbidden",
|
||||
message: `forbidden: User "" cannot get path "/apis": Everything is forbidden.`,
|
||||
},
|
||||
{
|
||||
name: "401",
|
||||
masterConfig: initUnauthorizedMasterCongfig(),
|
||||
statusCode: http.StatusUnauthorized,
|
||||
reqPath: "/apis",
|
||||
reason: "Unauthorized",
|
||||
message: `Unauthorized`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
_, s, closeFn := framework.RunAMaster(tc.masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
u := s.URL + tc.reqPath
|
||||
resp, err := http.Get(u)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting %s: %v", u, err)
|
||||
}
|
||||
if resp.StatusCode != tc.statusCode {
|
||||
t.Fatalf("got status %v instead of %s", resp.StatusCode, tc.name)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
data, _ := ioutil.ReadAll(resp.Body)
|
||||
decodedData := map[string]interface{}{}
|
||||
if err := json.Unmarshal(data, &decodedData); err != nil {
|
||||
t.Logf("body: %s", string(data))
|
||||
t.Fatalf("got error decoding data: %v", err)
|
||||
}
|
||||
t.Logf("body: %s", string(data))
|
||||
|
||||
if got, expected := decodedData["apiVersion"], "v1"; got != expected {
|
||||
t.Errorf("unexpected apiVersion %q, expected %q", got, expected)
|
||||
}
|
||||
if got, expected := decodedData["kind"], "Status"; got != expected {
|
||||
t.Errorf("unexpected kind %q, expected %q", got, expected)
|
||||
}
|
||||
if got, expected := decodedData["status"], "Failure"; got != expected {
|
||||
t.Errorf("unexpected status %q, expected %q", got, expected)
|
||||
}
|
||||
if got, expected := decodedData["code"], float64(tc.statusCode); got != expected {
|
||||
t.Errorf("unexpected code %v, expected %v", got, expected)
|
||||
}
|
||||
if got, expected := decodedData["reason"], tc.reason; got != expected {
|
||||
t.Errorf("unexpected reason %v, expected %v", got, expected)
|
||||
}
|
||||
if got, expected := decodedData["message"], tc.message; got != expected {
|
||||
t.Errorf("unexpected message %v, expected %v", got, expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatchSucceedsWithoutArgs(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
resp, err := http.Get(s.URL + "/api/v1/namespaces?watch=1")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting experimental prefix: %v", err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("got status %v instead of 200 OK", resp.StatusCode)
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
var hpaV1 string = `
|
||||
{
|
||||
"apiVersion": "autoscaling/v1",
|
||||
"kind": "HorizontalPodAutoscaler",
|
||||
"metadata": {
|
||||
"name": "test-hpa",
|
||||
"namespace": "default"
|
||||
},
|
||||
"spec": {
|
||||
"scaleTargetRef": {
|
||||
"kind": "ReplicationController",
|
||||
"name": "test-hpa",
|
||||
"namespace": "default"
|
||||
},
|
||||
"minReplicas": 1,
|
||||
"maxReplicas": 10,
|
||||
"targetCPUUtilizationPercentage": 50
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
var deploymentExtensions string = `
|
||||
{
|
||||
"apiVersion": "extensions/v1beta1",
|
||||
"kind": "Deployment",
|
||||
"metadata": {
|
||||
"name": "test-deployment1",
|
||||
"namespace": "default"
|
||||
},
|
||||
"spec": {
|
||||
"replicas": 1,
|
||||
"template": {
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"app": "nginx0"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [{
|
||||
"name": "nginx",
|
||||
"image": "gcr.io/google-containers/nginx:1.7.9"
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
var deploymentApps string = `
|
||||
{
|
||||
"apiVersion": "apps/v1beta1",
|
||||
"kind": "Deployment",
|
||||
"metadata": {
|
||||
"name": "test-deployment2",
|
||||
"namespace": "default"
|
||||
},
|
||||
"spec": {
|
||||
"replicas": 1,
|
||||
"template": {
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"app": "nginx0"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [{
|
||||
"name": "nginx",
|
||||
"image": "gcr.io/google-containers/nginx:1.7.9"
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
func autoscalingPath(resource, namespace, name string) string {
|
||||
return testapi.Autoscaling.ResourcePath(resource, namespace, name)
|
||||
}
|
||||
|
||||
func batchPath(resource, namespace, name string) string {
|
||||
return testapi.Batch.ResourcePath(resource, namespace, name)
|
||||
}
|
||||
|
||||
func extensionsPath(resource, namespace, name string) string {
|
||||
return testapi.Extensions.ResourcePath(resource, namespace, name)
|
||||
}
|
||||
|
||||
func appsPath(resource, namespace, name string) string {
|
||||
return testapi.Apps.ResourcePath(resource, namespace, name)
|
||||
}
|
||||
|
||||
func TestAutoscalingGroupBackwardCompatibility(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
transport := http.DefaultTransport
|
||||
|
||||
requests := []struct {
|
||||
verb string
|
||||
URL string
|
||||
body string
|
||||
expectedStatusCodes map[int]bool
|
||||
expectedVersion string
|
||||
}{
|
||||
{"POST", autoscalingPath("horizontalpodautoscalers", metav1.NamespaceDefault, ""), hpaV1, integration.Code201, ""},
|
||||
{"GET", autoscalingPath("horizontalpodautoscalers", metav1.NamespaceDefault, ""), "", integration.Code200, testapi.Autoscaling.GroupVersion().String()},
|
||||
}
|
||||
|
||||
for _, r := range requests {
|
||||
bodyBytes := bytes.NewReader([]byte(r.body))
|
||||
req, err := http.NewRequest(r.verb, s.URL+r.URL, bodyBytes)
|
||||
if err != nil {
|
||||
t.Logf("case %v", r)
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
func() {
|
||||
resp, err := transport.RoundTrip(req)
|
||||
defer resp.Body.Close()
|
||||
if err != nil {
|
||||
t.Logf("case %v", r)
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
b, _ := ioutil.ReadAll(resp.Body)
|
||||
body := string(b)
|
||||
if _, ok := r.expectedStatusCodes[resp.StatusCode]; !ok {
|
||||
t.Logf("case %v", r)
|
||||
t.Errorf("Expected status one of %v, but got %v", r.expectedStatusCodes, resp.StatusCode)
|
||||
t.Errorf("Body: %v", body)
|
||||
}
|
||||
if !strings.Contains(body, "\"apiVersion\":\""+r.expectedVersion) {
|
||||
t.Logf("case %v", r)
|
||||
t.Errorf("Expected version %v, got body %v", r.expectedVersion, body)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppsGroupBackwardCompatibility(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
transport := http.DefaultTransport
|
||||
|
||||
requests := []struct {
|
||||
verb string
|
||||
URL string
|
||||
body string
|
||||
expectedStatusCodes map[int]bool
|
||||
expectedVersion string
|
||||
}{
|
||||
// Post to extensions endpoint and get back from both: extensions and apps
|
||||
{"POST", extensionsPath("deployments", metav1.NamespaceDefault, ""), deploymentExtensions, integration.Code201, ""},
|
||||
{"GET", extensionsPath("deployments", metav1.NamespaceDefault, "test-deployment1"), "", integration.Code200, testapi.Extensions.GroupVersion().String()},
|
||||
{"GET", appsPath("deployments", metav1.NamespaceDefault, "test-deployment1"), "", integration.Code200, testapi.Apps.GroupVersion().String()},
|
||||
{"DELETE", extensionsPath("deployments", metav1.NamespaceDefault, "test-deployment1"), "", integration.Code200, testapi.Extensions.GroupVersion().String()},
|
||||
// Post to apps endpoint and get back from both: apps and extensions
|
||||
{"POST", appsPath("deployments", metav1.NamespaceDefault, ""), deploymentApps, integration.Code201, ""},
|
||||
{"GET", appsPath("deployments", metav1.NamespaceDefault, "test-deployment2"), "", integration.Code200, testapi.Apps.GroupVersion().String()},
|
||||
{"GET", extensionsPath("deployments", metav1.NamespaceDefault, "test-deployment2"), "", integration.Code200, testapi.Extensions.GroupVersion().String()},
|
||||
{"DELETE", appsPath("deployments", metav1.NamespaceDefault, "test-deployment2"), "", integration.Code200, testapi.Apps.GroupVersion().String()},
|
||||
}
|
||||
|
||||
for _, r := range requests {
|
||||
bodyBytes := bytes.NewReader([]byte(r.body))
|
||||
req, err := http.NewRequest(r.verb, s.URL+r.URL, bodyBytes)
|
||||
if err != nil {
|
||||
t.Logf("case %v", r)
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
func() {
|
||||
resp, err := transport.RoundTrip(req)
|
||||
defer resp.Body.Close()
|
||||
if err != nil {
|
||||
t.Logf("case %v", r)
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
b, _ := ioutil.ReadAll(resp.Body)
|
||||
body := string(b)
|
||||
if _, ok := r.expectedStatusCodes[resp.StatusCode]; !ok {
|
||||
t.Logf("case %v", r)
|
||||
t.Errorf("Expected status one of %v, but got %v", r.expectedStatusCodes, resp.StatusCode)
|
||||
t.Errorf("Body: %v", body)
|
||||
}
|
||||
if !strings.Contains(body, "\"apiVersion\":\""+r.expectedVersion) {
|
||||
t.Logf("case %v", r)
|
||||
t.Errorf("Expected version %v, got body %v", r.expectedVersion, body)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccept(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
resp, err := http.Get(s.URL + "/api/")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting api: %v", err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("got status %v instead of 200 OK", resp.StatusCode)
|
||||
}
|
||||
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
if resp.Header.Get("Content-Type") != "application/json" {
|
||||
t.Errorf("unexpected content: %s", body)
|
||||
}
|
||||
if err := json.Unmarshal(body, &map[string]interface{}{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", s.URL+"/api/", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
req.Header.Set("Accept", "application/yaml")
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
body, _ = ioutil.ReadAll(resp.Body)
|
||||
if resp.Header.Get("Content-Type") != "application/yaml" {
|
||||
t.Errorf("unexpected content: %s", body)
|
||||
}
|
||||
t.Logf("body: %s", body)
|
||||
if err := yaml.Unmarshal(body, &map[string]interface{}{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
req, err = http.NewRequest("GET", s.URL+"/api/", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
req.Header.Set("Accept", "application/json, application/yaml")
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
body, _ = ioutil.ReadAll(resp.Body)
|
||||
if resp.Header.Get("Content-Type") != "application/json" {
|
||||
t.Errorf("unexpected content: %s", body)
|
||||
}
|
||||
t.Logf("body: %s", body)
|
||||
if err := yaml.Unmarshal(body, &map[string]interface{}{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
req, err = http.NewRequest("GET", s.URL+"/api/", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
req.Header.Set("Accept", "application") // not a valid media type
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusNotAcceptable {
|
||||
t.Errorf("unexpected error from the server")
|
||||
}
|
||||
}
|
||||
|
||||
func countEndpoints(eps *api.Endpoints) int {
|
||||
count := 0
|
||||
for i := range eps.Subsets {
|
||||
count += len(eps.Subsets[i].Addresses) * len(eps.Subsets[i].Ports)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func TestMasterService(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(framework.NewIntegrationTestMasterConfig())
|
||||
defer closeFn()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[api.GroupName].GroupVersion()}})
|
||||
|
||||
err := wait.Poll(time.Second, time.Minute, func() (bool, error) {
|
||||
svcList, err := client.Core().Services(metav1.NamespaceDefault).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
found := false
|
||||
for i := range svcList.Items {
|
||||
if svcList.Items[i].Name == "kubernetes" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found {
|
||||
ep, err := client.Core().Endpoints(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
if countEndpoints(ep) == 0 {
|
||||
return false, fmt.Errorf("no endpoints for kubernetes service: %v", ep)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceAlloc(t *testing.T) {
|
||||
cfg := framework.NewIntegrationTestMasterConfig()
|
||||
_, cidr, err := net.ParseCIDR("192.168.0.0/29")
|
||||
if err != nil {
|
||||
t.Fatalf("bad cidr: %v", err)
|
||||
}
|
||||
cfg.ExtraConfig.ServiceIPRange = *cidr
|
||||
_, s, closeFn := framework.RunAMaster(cfg)
|
||||
defer closeFn()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[api.GroupName].GroupVersion()}})
|
||||
|
||||
svc := func(i int) *api.Service {
|
||||
return &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("svc-%v", i),
|
||||
},
|
||||
Spec: api.ServiceSpec{
|
||||
Type: api.ServiceTypeClusterIP,
|
||||
Ports: []api.ServicePort{
|
||||
{Port: 80},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Wait until the default "kubernetes" service is created.
|
||||
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
||||
_, err := client.Core().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
return !errors.IsNotFound(err), nil
|
||||
}); err != nil {
|
||||
t.Fatalf("creating kubernetes service timed out")
|
||||
}
|
||||
|
||||
// make 5 more services to take up all IPs
|
||||
for i := 0; i < 5; i++ {
|
||||
if _, err := client.Core().Services(metav1.NamespaceDefault).Create(svc(i)); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Make another service. It will fail because we're out of cluster IPs
|
||||
if _, err := client.Core().Services(metav1.NamespaceDefault).Create(svc(8)); err != nil {
|
||||
if !strings.Contains(err.Error(), "range is full") {
|
||||
t.Errorf("unexpected error text: %v", err)
|
||||
}
|
||||
} else {
|
||||
svcs, err := client.Core().Services(metav1.NamespaceAll).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected success, and error getting the services: %v", err)
|
||||
}
|
||||
allIPs := []string{}
|
||||
for _, s := range svcs.Items {
|
||||
allIPs = append(allIPs, s.Spec.ClusterIP)
|
||||
}
|
||||
t.Fatalf("unexpected creation success. The following IPs exist: %#v. It should only be possible to allocate 2 IP addresses in this cluster.\n\n%#v", allIPs, svcs)
|
||||
}
|
||||
|
||||
// Delete the first service.
|
||||
if err := client.Core().Services(metav1.NamespaceDefault).Delete(svc(1).ObjectMeta.Name, nil); err != nil {
|
||||
t.Fatalf("got unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// This time creating the second service should work.
|
||||
if _, err := client.Core().Services(metav1.NamespaceDefault).Create(svc(8)); err != nil {
|
||||
t.Fatalf("got unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpdateNodeObjects represents a simple version of the behavior of node checkins at steady
|
||||
// state. This test allows for easy profiling of a realistic master scenario for baseline CPU
|
||||
// in very large clusters. It is disabled by default - start a kube-apiserver and pass
|
||||
// UPDATE_NODE_APISERVER as the host value.
|
||||
func TestUpdateNodeObjects(t *testing.T) {
|
||||
server := os.Getenv("UPDATE_NODE_APISERVER")
|
||||
if len(server) == 0 {
|
||||
t.Skip("UPDATE_NODE_APISERVER is not set")
|
||||
}
|
||||
c := clienttypedv1.NewForConfigOrDie(&restclient.Config{
|
||||
QPS: 10000,
|
||||
Host: server,
|
||||
ContentConfig: restclient.ContentConfig{
|
||||
AcceptContentTypes: "application/vnd.kubernetes.protobuf",
|
||||
ContentType: "application/vnd.kubernetes.protobuf",
|
||||
},
|
||||
})
|
||||
|
||||
nodes := 400
|
||||
listers := 5
|
||||
watchers := 50
|
||||
iterations := 10000
|
||||
|
||||
for i := 0; i < nodes*6; i++ {
|
||||
c.Nodes().Delete(fmt.Sprintf("node-%d", i), nil)
|
||||
_, err := c.Nodes().Create(&v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("node-%d", i),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for k := 0; k < listers; k++ {
|
||||
go func(lister int) {
|
||||
for i := 0; i < iterations; i++ {
|
||||
_, err := c.Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("[list:%d] error after %d: %v\n", lister, i, err)
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Duration(lister)*10*time.Millisecond + 1500*time.Millisecond)
|
||||
}
|
||||
}(k)
|
||||
}
|
||||
|
||||
for k := 0; k < watchers; k++ {
|
||||
go func(lister int) {
|
||||
w, err := c.Nodes().Watch(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("[watch:%d] error: %v", k, err)
|
||||
return
|
||||
}
|
||||
i := 0
|
||||
for r := range w.ResultChan() {
|
||||
i++
|
||||
if _, ok := r.Object.(*v1.Node); !ok {
|
||||
fmt.Printf("[watch:%d] unexpected object after %d: %#v\n", lister, i, r)
|
||||
}
|
||||
if i%100 == 0 {
|
||||
fmt.Printf("[watch:%d] iteration %d ...\n", lister, i)
|
||||
}
|
||||
}
|
||||
fmt.Printf("[watch:%d] done\n", lister)
|
||||
}(k)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(nodes - listers)
|
||||
|
||||
for j := 0; j < nodes; j++ {
|
||||
go func(node int) {
|
||||
var lastCount int
|
||||
for i := 0; i < iterations; i++ {
|
||||
if i%100 == 0 {
|
||||
fmt.Printf("[%d] iteration %d ...\n", node, i)
|
||||
}
|
||||
if i%20 == 0 {
|
||||
_, err := c.Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("[%d] error after %d: %v\n", node, i, err)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
r, err := c.Nodes().List(metav1.ListOptions{
|
||||
FieldSelector: fmt.Sprintf("metadata.name=node-%d", node),
|
||||
ResourceVersion: "0",
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf("[%d] error after %d: %v\n", node, i, err)
|
||||
break
|
||||
}
|
||||
if len(r.Items) != 1 {
|
||||
fmt.Printf("[%d] error after %d: unexpected list count\n", node, i)
|
||||
break
|
||||
}
|
||||
|
||||
n, err := c.Nodes().Get(fmt.Sprintf("node-%d", node), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("[%d] error after %d: %v\n", node, i, err)
|
||||
break
|
||||
}
|
||||
if len(n.Status.Conditions) != lastCount {
|
||||
fmt.Printf("[%d] worker set %d, read %d conditions\n", node, lastCount, len(n.Status.Conditions))
|
||||
break
|
||||
}
|
||||
previousCount := lastCount
|
||||
switch {
|
||||
case i%4 == 0:
|
||||
lastCount = 1
|
||||
n.Status.Conditions = []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "foo",
|
||||
},
|
||||
}
|
||||
case i%4 == 1:
|
||||
lastCount = 2
|
||||
n.Status.Conditions = []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "foo",
|
||||
},
|
||||
{
|
||||
Type: v1.NodeDiskPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "bar",
|
||||
},
|
||||
}
|
||||
case i%4 == 1:
|
||||
lastCount = 0
|
||||
n.Status.Conditions = nil
|
||||
}
|
||||
if _, err := c.Nodes().UpdateStatus(n); err != nil {
|
||||
if !errors.IsConflict(err) {
|
||||
fmt.Printf("[%d] error after %d: %v\n", node, i, err)
|
||||
break
|
||||
}
|
||||
lastCount = previousCount
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
fmt.Printf("[%d] done\n", node)
|
||||
}(j)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
49
vendor/k8s.io/kubernetes/test/integration/metrics/BUILD
generated
vendored
Normal file
49
vendor/k8s.io/kubernetes/test/integration/metrics/BUILD
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["doc.go"],
|
||||
importpath = "k8s.io/kubernetes/test/integration/metrics",
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"main_test.go",
|
||||
"metrics_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/metrics",
|
||||
library = ":go_default_library",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_model/go:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
17
vendor/k8s.io/kubernetes/test/integration/metrics/doc.go
generated
vendored
Normal file
17
vendor/k8s.io/kubernetes/test/integration/metrics/doc.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
27
vendor/k8s.io/kubernetes/test/integration/metrics/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/metrics/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
127
vendor/k8s.io/kubernetes/test/integration/metrics/metrics_test.go
generated
vendored
Normal file
127
vendor/k8s.io/kubernetes/test/integration/metrics/metrics_test.go
generated
vendored
Normal file
@ -0,0 +1,127 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/golang/protobuf/proto"
|
||||
prometheuspb "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
const scrapeRequestHeader = "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text"
|
||||
|
||||
func scrapeMetrics(s *httptest.Server) ([]*prometheuspb.MetricFamily, error) {
|
||||
req, err := http.NewRequest("GET", s.URL+"/metrics", nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to create http request: %v", err)
|
||||
}
|
||||
// Ask the prometheus exporter for its text protocol buffer format, since it's
|
||||
// much easier to parse than its plain-text format. Don't use the serialized
|
||||
// proto representation since it uses a non-standard varint delimiter between
|
||||
// metric families.
|
||||
req.Header.Add("Accept", scrapeRequestHeader)
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to contact metrics endpoint of master: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("Non-200 response trying to scrape metrics from master: %v", resp)
|
||||
}
|
||||
|
||||
// Each line in the response body should contain all the data for a single metric.
|
||||
var metrics []*prometheuspb.MetricFamily
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
for scanner.Scan() {
|
||||
var metric prometheuspb.MetricFamily
|
||||
if err := proto.UnmarshalText(scanner.Text(), &metric); err != nil {
|
||||
return nil, fmt.Errorf("Failed to unmarshal line of metrics response: %v", err)
|
||||
}
|
||||
glog.V(4).Infof("Got metric %q", metric.GetName())
|
||||
metrics = append(metrics, &metric)
|
||||
}
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
func checkForExpectedMetrics(t *testing.T, metrics []*prometheuspb.MetricFamily, expectedMetrics []string) {
|
||||
foundMetrics := make(map[string]bool)
|
||||
for _, metric := range metrics {
|
||||
foundMetrics[metric.GetName()] = true
|
||||
}
|
||||
for _, expected := range expectedMetrics {
|
||||
if _, found := foundMetrics[expected]; !found {
|
||||
t.Errorf("Master metrics did not include expected metric %q", expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMasterProcessMetrics(t *testing.T) {
|
||||
if runtime.GOOS == "darwin" || runtime.GOOS == "windows" {
|
||||
t.Skipf("not supported on GOOS=%s", runtime.GOOS)
|
||||
}
|
||||
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
metrics, err := scrapeMetrics(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkForExpectedMetrics(t, metrics, []string{
|
||||
"process_start_time_seconds",
|
||||
"process_cpu_seconds_total",
|
||||
"process_open_fds",
|
||||
"process_resident_memory_bytes",
|
||||
})
|
||||
}
|
||||
|
||||
func TestApiserverMetrics(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
// Make a request to the apiserver to ensure there's at least one data point
|
||||
// for the metrics we're expecting -- otherwise, they won't be exported.
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
if _, err := client.Core().Pods(metav1.NamespaceDefault).List(metav1.ListOptions{}); err != nil {
|
||||
t.Fatalf("unexpected error getting pods: %v", err)
|
||||
}
|
||||
|
||||
metrics, err := scrapeMetrics(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkForExpectedMetrics(t, metrics, []string{
|
||||
"apiserver_request_count",
|
||||
"apiserver_request_latencies",
|
||||
})
|
||||
}
|
39
vendor/k8s.io/kubernetes/test/integration/objectmeta/BUILD
generated
vendored
Normal file
39
vendor/k8s.io/kubernetes/test/integration/objectmeta/BUILD
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"main_test.go",
|
||||
"objectmeta_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/objectmeta",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
27
vendor/k8s.io/kubernetes/test/integration/objectmeta/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/objectmeta/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package objectmeta
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
53
vendor/k8s.io/kubernetes/test/integration/objectmeta/objectmeta_test.go
generated
vendored
Normal file
53
vendor/k8s.io/kubernetes/test/integration/objectmeta/objectmeta_test.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package objectmeta
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestIgnoreClusterName(t *testing.T) {
|
||||
config := framework.NewMasterConfig()
|
||||
_, s, closeFn := framework.RunAMaster(config)
|
||||
defer closeFn()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-namespace",
|
||||
ClusterName: "cluster-name-to-ignore",
|
||||
},
|
||||
}
|
||||
nsNew, err := client.Core().Namespaces().Create(&ns)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, ns.Name, nsNew.Name)
|
||||
assert.Empty(t, nsNew.ClusterName)
|
||||
|
||||
nsNew, err = client.Core().Namespaces().Update(&ns)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, ns.Name, nsNew.Name)
|
||||
assert.Empty(t, nsNew.ClusterName)
|
||||
}
|
35
vendor/k8s.io/kubernetes/test/integration/openshift/BUILD
generated
vendored
Normal file
35
vendor/k8s.io/kubernetes/test/integration/openshift/BUILD
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"main_test.go",
|
||||
"openshift_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/openshift",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/master:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
27
vendor/k8s.io/kubernetes/test/integration/openshift/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/openshift/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openshift
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
42
vendor/k8s.io/kubernetes/test/integration/openshift/openshift_test.go
generated
vendored
Normal file
42
vendor/k8s.io/kubernetes/test/integration/openshift/openshift_test.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openshift
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/kubernetes/pkg/master"
|
||||
)
|
||||
|
||||
// This test references methods that OpenShift uses to customize the master on startup, that
|
||||
// are not referenced directly by a master.
|
||||
func TestMasterExportsSymbols(t *testing.T) {
|
||||
_ = &master.Config{
|
||||
GenericConfig: &genericapiserver.Config{
|
||||
EnableMetrics: true,
|
||||
},
|
||||
ExtraConfig: master.ExtraConfig{
|
||||
EnableCoreControllers: false,
|
||||
EnableUISupport: false,
|
||||
EnableLogsSupport: false,
|
||||
},
|
||||
}
|
||||
_ = &master.Master{
|
||||
GenericAPIServer: &genericapiserver.GenericAPIServer{},
|
||||
}
|
||||
}
|
39
vendor/k8s.io/kubernetes/test/integration/pods/BUILD
generated
vendored
Normal file
39
vendor/k8s.io/kubernetes/test/integration/pods/BUILD
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"main_test.go",
|
||||
"pods_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/pods",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
27
vendor/k8s.io/kubernetes/test/integration/pods/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/pods/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pods
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
181
vendor/k8s.io/kubernetes/test/integration/pods/pods_test.go
generated
vendored
Normal file
181
vendor/k8s.io/kubernetes/test/integration/pods/pods_test.go
generated
vendored
Normal file
@ -0,0 +1,181 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pods
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestPodUpdateActiveDeadlineSeconds(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("pod-activedeadline-update", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
|
||||
var (
|
||||
iZero = int64(0)
|
||||
i30 = int64(30)
|
||||
i60 = int64(60)
|
||||
iNeg = int64(-1)
|
||||
)
|
||||
|
||||
prototypePod := func() *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "xxx",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
original *int64
|
||||
update *int64
|
||||
valid bool
|
||||
}{
|
||||
{
|
||||
name: "no change, nil",
|
||||
original: nil,
|
||||
update: nil,
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "no change, set",
|
||||
original: &i30,
|
||||
update: &i30,
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "change to positive from nil",
|
||||
original: nil,
|
||||
update: &i60,
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "change to smaller positive",
|
||||
original: &i60,
|
||||
update: &i30,
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "change to larger positive",
|
||||
original: &i30,
|
||||
update: &i60,
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
name: "change to negative from positive",
|
||||
original: &i30,
|
||||
update: &iNeg,
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
name: "change to negative from nil",
|
||||
original: nil,
|
||||
update: &iNeg,
|
||||
valid: false,
|
||||
},
|
||||
// zero is not allowed, must be a positive integer
|
||||
{
|
||||
name: "change to zero from positive",
|
||||
original: &i30,
|
||||
update: &iZero,
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
name: "change to nil from positive",
|
||||
original: &i30,
|
||||
update: nil,
|
||||
valid: false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range cases {
|
||||
pod := prototypePod()
|
||||
pod.Spec.ActiveDeadlineSeconds = tc.original
|
||||
pod.ObjectMeta.Name = fmt.Sprintf("activedeadlineseconds-test-%v", i)
|
||||
|
||||
if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil {
|
||||
t.Errorf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
pod.Spec.ActiveDeadlineSeconds = tc.update
|
||||
|
||||
_, err := client.Core().Pods(ns.Name).Update(pod)
|
||||
if tc.valid && err != nil {
|
||||
t.Errorf("%v: failed to update pod: %v", tc.name, err)
|
||||
} else if !tc.valid && err == nil {
|
||||
t.Errorf("%v: unexpected allowed update to pod", tc.name)
|
||||
}
|
||||
|
||||
integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodReadOnlyFilesystem(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
isReadOnly := true
|
||||
ns := framework.CreateTestingNamespace("pod-readonly-root", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "xxx",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
ReadOnlyRootFilesystem: &isReadOnly,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil {
|
||||
t.Errorf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name)
|
||||
}
|
54
vendor/k8s.io/kubernetes/test/integration/quota/BUILD
generated
vendored
Normal file
54
vendor/k8s.io/kubernetes/test/integration/quota/BUILD
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"main_test.go",
|
||||
"quota_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/quota",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/replication:go_default_library",
|
||||
"//pkg/controller/resourcequota:go_default_library",
|
||||
"//pkg/quota/generic:go_default_library",
|
||||
"//pkg/quota/install:go_default_library",
|
||||
"//plugin/pkg/admission/resourcequota:go_default_library",
|
||||
"//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
27
vendor/k8s.io/kubernetes/test/integration/quota/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/quota/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
371
vendor/k8s.io/kubernetes/test/integration/quota/quota_test.go
generated
vendored
Normal file
371
vendor/k8s.io/kubernetes/test/integration/quota/quota_test.go
generated
vendored
Normal file
@ -0,0 +1,371 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
internalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
||||
resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota"
|
||||
"k8s.io/kubernetes/pkg/quota/generic"
|
||||
quotainstall "k8s.io/kubernetes/pkg/quota/install"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/resourcequota"
|
||||
resourcequotaapi "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
// 1.2 code gets:
|
||||
// quota_test.go:95: Took 4.218619579s to scale up without quota
|
||||
// quota_test.go:199: unexpected error: timed out waiting for the condition, ended with 342 pods (1 minute)
|
||||
// 1.3+ code gets:
|
||||
// quota_test.go:100: Took 4.196205966s to scale up without quota
|
||||
// quota_test.go:115: Took 12.021640372s to scale up with quota
|
||||
func TestQuota(t *testing.T) {
|
||||
// Set up a master
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-h.Initialized
|
||||
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
|
||||
admissionCh := make(chan struct{})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
config := &resourcequotaapi.Configuration{}
|
||||
admission, err := resourcequota.NewResourceQuota(config, 5, admissionCh)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
admission.SetInternalKubeClientSet(internalClientset)
|
||||
internalInformers := internalinformers.NewSharedInformerFactory(internalClientset, controller.NoResyncPeriodFunc())
|
||||
admission.SetInternalKubeInformerFactory(internalInformers)
|
||||
qca := quotainstall.NewQuotaConfigurationForAdmission()
|
||||
admission.SetQuotaConfiguration(qca)
|
||||
defer close(admissionCh)
|
||||
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.AdmissionControl = admission
|
||||
_, _, closeFn := framework.RunAMasterUsingServer(masterConfig, s, h)
|
||||
defer closeFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("quotaed", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
ns2 := framework.CreateTestingNamespace("non-quotaed", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns2, s, t)
|
||||
|
||||
controllerCh := make(chan struct{})
|
||||
defer close(controllerCh)
|
||||
|
||||
informers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
||||
rm := replicationcontroller.NewReplicationManager(
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Core().V1().ReplicationControllers(),
|
||||
clientset,
|
||||
replicationcontroller.BurstReplicas,
|
||||
)
|
||||
rm.SetEventRecorder(&record.FakeRecorder{})
|
||||
go rm.Run(3, controllerCh)
|
||||
|
||||
discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources
|
||||
listerFuncForResource := generic.ListerFuncForResourceFunc(informers.ForResource)
|
||||
qc := quotainstall.NewQuotaConfigurationForControllers(listerFuncForResource)
|
||||
informersStarted := make(chan struct{})
|
||||
resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
|
||||
QuotaClient: clientset.Core(),
|
||||
ResourceQuotaInformer: informers.Core().V1().ResourceQuotas(),
|
||||
ResyncPeriod: controller.NoResyncPeriodFunc,
|
||||
InformerFactory: informers,
|
||||
ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc,
|
||||
DiscoveryFunc: discoveryFunc,
|
||||
IgnoredResourcesFunc: qc.IgnoredResources,
|
||||
InformersStarted: informersStarted,
|
||||
Registry: generic.NewRegistry(qc.Evaluators()),
|
||||
}
|
||||
resourceQuotaController, err := resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
go resourceQuotaController.Run(2, controllerCh)
|
||||
|
||||
// Periodically the quota controller to detect new resource types
|
||||
go resourceQuotaController.Sync(discoveryFunc, 30*time.Second, controllerCh)
|
||||
|
||||
internalInformers.Start(controllerCh)
|
||||
informers.Start(controllerCh)
|
||||
close(informersStarted)
|
||||
|
||||
startTime := time.Now()
|
||||
scale(t, ns2.Name, clientset)
|
||||
endTime := time.Now()
|
||||
t.Logf("Took %v to scale up without quota", endTime.Sub(startTime))
|
||||
|
||||
quota := &v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "quota",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourcePods: resource.MustParse("1000"),
|
||||
},
|
||||
},
|
||||
}
|
||||
waitForQuota(t, quota, clientset)
|
||||
|
||||
startTime = time.Now()
|
||||
scale(t, "quotaed", clientset)
|
||||
endTime = time.Now()
|
||||
t.Logf("Took %v to scale up with quota", endTime.Sub(startTime))
|
||||
}
|
||||
|
||||
func waitForQuota(t *testing.T, quota *v1.ResourceQuota, clientset *clientset.Clientset) {
|
||||
w, err := clientset.Core().ResourceQuotas(quota.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: quota.Name}))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if _, err := clientset.Core().ResourceQuotas(quota.Namespace).Create(quota); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
_, err = watch.Until(1*time.Minute, w, func(event watch.Event) (bool, error) {
|
||||
switch event.Type {
|
||||
case watch.Modified:
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
switch cast := event.Object.(type) {
|
||||
case *v1.ResourceQuota:
|
||||
if len(cast.Status.Hard) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
|
||||
target := int32(100)
|
||||
rc := &v1.ReplicationController{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: &target,
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "container",
|
||||
Image: "busybox",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w, err := clientset.Core().ReplicationControllers(namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: rc.Name}))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if _, err := clientset.Core().ReplicationControllers(namespace).Create(rc); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
_, err = watch.Until(3*time.Minute, w, func(event watch.Event) (bool, error) {
|
||||
switch event.Type {
|
||||
case watch.Modified:
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
|
||||
switch cast := event.Object.(type) {
|
||||
case *v1.ReplicationController:
|
||||
fmt.Printf("Found %v of %v replicas\n", int(cast.Status.Replicas), target)
|
||||
if cast.Status.Replicas == target {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
pods, _ := clientset.Core().Pods(namespace).List(metav1.ListOptions{LabelSelector: labels.Everything().String(), FieldSelector: fields.Everything().String()})
|
||||
t.Fatalf("unexpected error: %v, ended with %v pods", err, len(pods.Items))
|
||||
}
|
||||
}
|
||||
|
||||
func TestQuotaLimitedResourceDenial(t *testing.T) {
|
||||
// Set up a master
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-h.Initialized
|
||||
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
|
||||
admissionCh := make(chan struct{})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
|
||||
// stop creation of a pod resource unless there is a quota
|
||||
config := &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchContains: []string{"pods"},
|
||||
},
|
||||
},
|
||||
}
|
||||
qca := quotainstall.NewQuotaConfigurationForAdmission()
|
||||
admission, err := resourcequota.NewResourceQuota(config, 5, admissionCh)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
admission.SetInternalKubeClientSet(internalClientset)
|
||||
internalInformers := internalinformers.NewSharedInformerFactory(internalClientset, controller.NoResyncPeriodFunc())
|
||||
admission.SetInternalKubeInformerFactory(internalInformers)
|
||||
admission.SetQuotaConfiguration(qca)
|
||||
defer close(admissionCh)
|
||||
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.AdmissionControl = admission
|
||||
_, _, closeFn := framework.RunAMasterUsingServer(masterConfig, s, h)
|
||||
defer closeFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("quota", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
controllerCh := make(chan struct{})
|
||||
defer close(controllerCh)
|
||||
|
||||
informers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
||||
rm := replicationcontroller.NewReplicationManager(
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Core().V1().ReplicationControllers(),
|
||||
clientset,
|
||||
replicationcontroller.BurstReplicas,
|
||||
)
|
||||
rm.SetEventRecorder(&record.FakeRecorder{})
|
||||
go rm.Run(3, controllerCh)
|
||||
|
||||
discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources
|
||||
listerFuncForResource := generic.ListerFuncForResourceFunc(informers.ForResource)
|
||||
qc := quotainstall.NewQuotaConfigurationForControllers(listerFuncForResource)
|
||||
informersStarted := make(chan struct{})
|
||||
resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
|
||||
QuotaClient: clientset.Core(),
|
||||
ResourceQuotaInformer: informers.Core().V1().ResourceQuotas(),
|
||||
ResyncPeriod: controller.NoResyncPeriodFunc,
|
||||
InformerFactory: informers,
|
||||
ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc,
|
||||
DiscoveryFunc: discoveryFunc,
|
||||
IgnoredResourcesFunc: qc.IgnoredResources,
|
||||
InformersStarted: informersStarted,
|
||||
Registry: generic.NewRegistry(qc.Evaluators()),
|
||||
}
|
||||
resourceQuotaController, err := resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
go resourceQuotaController.Run(2, controllerCh)
|
||||
|
||||
// Periodically the quota controller to detect new resource types
|
||||
go resourceQuotaController.Sync(discoveryFunc, 30*time.Second, controllerCh)
|
||||
|
||||
internalInformers.Start(controllerCh)
|
||||
informers.Start(controllerCh)
|
||||
close(informersStarted)
|
||||
|
||||
// try to create a pod
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "container",
|
||||
Image: "busybox",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if _, err := clientset.Core().Pods(ns.Name).Create(pod); err == nil {
|
||||
t.Fatalf("expected error for insufficient quota")
|
||||
}
|
||||
|
||||
// now create a covering quota
|
||||
// note: limited resource does a matchContains, so we now have "pods" matching "pods" and "count/pods"
|
||||
quota := &v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "quota",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourcePods: resource.MustParse("1000"),
|
||||
v1.ResourceName("count/pods"): resource.MustParse("1000"),
|
||||
},
|
||||
},
|
||||
}
|
||||
waitForQuota(t, quota, clientset)
|
||||
|
||||
// attempt to create a new pod once the quota is propagated
|
||||
err = wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
|
||||
// retry until we succeed (to allow time for all changes to propagate)
|
||||
if _, err := clientset.Core().Pods(ns.Name).Create(pod); err == nil {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
51
vendor/k8s.io/kubernetes/test/integration/replicaset/BUILD
generated
vendored
Normal file
51
vendor/k8s.io/kubernetes/test/integration/replicaset/BUILD
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"main_test.go",
|
||||
"replicaset_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/replicaset",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/controller/replicaset:go_default_library",
|
||||
"//pkg/util/slice:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
27
vendor/k8s.io/kubernetes/test/integration/replicaset/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/replicaset/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package replicaset
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
1039
vendor/k8s.io/kubernetes/test/integration/replicaset/replicaset_test.go
generated
vendored
Normal file
1039
vendor/k8s.io/kubernetes/test/integration/replicaset/replicaset_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
47
vendor/k8s.io/kubernetes/test/integration/replicationcontroller/BUILD
generated
vendored
Normal file
47
vendor/k8s.io/kubernetes/test/integration/replicationcontroller/BUILD
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"main_test.go",
|
||||
"replicationcontroller_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/replicationcontroller",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/controller/replication:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
27
vendor/k8s.io/kubernetes/test/integration/replicationcontroller/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/replicationcontroller/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package replicationcontroller
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
887
vendor/k8s.io/kubernetes/test/integration/replicationcontroller/replicationcontroller_test.go
generated
vendored
Normal file
887
vendor/k8s.io/kubernetes/test/integration/replicationcontroller/replicationcontroller_test.go
generated
vendored
Normal file
@ -0,0 +1,887 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package replicationcontroller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/retry"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller/replication"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
interval = 100 * time.Millisecond
|
||||
timeout = 60 * time.Second
|
||||
)
|
||||
|
||||
func labelMap() map[string]string {
|
||||
return map[string]string{"foo": "bar"}
|
||||
}
|
||||
|
||||
func newRC(name, namespace string, replicas int) *v1.ReplicationController {
|
||||
replicasCopy := int32(replicas)
|
||||
return &v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicationController",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Selector: labelMap(),
|
||||
Replicas: &replicasCopy,
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labelMap(),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newMatchingPod(podName, namespace string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: namespace,
|
||||
Labels: labelMap(),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// verifyRemainingObjects verifies if the number of the remaining replication
|
||||
// controllers and pods are rcNum and podNum. It returns error if the
|
||||
// communication with the API server fails.
|
||||
func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rcNum, podNum int) (bool, error) {
|
||||
rcClient := clientSet.CoreV1().ReplicationControllers(namespace)
|
||||
podClient := clientSet.CoreV1().Pods(namespace)
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list pods: %v", err)
|
||||
}
|
||||
var ret = true
|
||||
if len(pods.Items) != podNum {
|
||||
ret = false
|
||||
t.Logf("expect %d pods, got %d pods", podNum, len(pods.Items))
|
||||
}
|
||||
rcs, err := rcClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list replication controllers: %v", err)
|
||||
}
|
||||
if len(rcs.Items) != rcNum {
|
||||
ret = false
|
||||
t.Logf("expect %d RCs, got %d RCs", rcNum, len(rcs.Items))
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func rmSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *replication.ReplicationManager, informers.SharedInformerFactory, clientset.Interface) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
|
||||
config := restclient.Config{Host: s.URL}
|
||||
clientSet, err := clientset.NewForConfig(&config)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in create clientset: %v", err)
|
||||
}
|
||||
resyncPeriod := 12 * time.Hour
|
||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "rc-informers")), resyncPeriod)
|
||||
|
||||
rm := replication.NewReplicationManager(
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Core().V1().ReplicationControllers(),
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replication-controller")),
|
||||
replication.BurstReplicas,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create replication controller")
|
||||
}
|
||||
return s, closeFn, rm, informers, clientSet
|
||||
}
|
||||
|
||||
func rmSimpleSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, clientset.Interface) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
|
||||
config := restclient.Config{Host: s.URL}
|
||||
clientSet, err := clientset.NewForConfig(&config)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in create clientset: %v", err)
|
||||
}
|
||||
return s, closeFn, clientSet
|
||||
}
|
||||
|
||||
// Run RC controller and informers
|
||||
func runControllerAndInformers(t *testing.T, rm *replication.ReplicationManager, informers informers.SharedInformerFactory, podNum int) chan struct{} {
|
||||
stopCh := make(chan struct{})
|
||||
informers.Start(stopCh)
|
||||
waitToObservePods(t, informers.Core().V1().Pods().Informer(), podNum)
|
||||
go rm.Run(5, stopCh)
|
||||
return stopCh
|
||||
}
|
||||
|
||||
// wait for the podInformer to observe the pods. Call this function before
|
||||
// running the RC controller to prevent the rc manager from creating new pods
|
||||
// rather than adopting the existing ones.
|
||||
func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podNum int) {
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
objects := podInformer.GetIndexer().List()
|
||||
return len(objects) == podNum, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error encountered when waiting for podInformer to observe the pods: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func createRCsPods(t *testing.T, clientSet clientset.Interface, rcs []*v1.ReplicationController, pods []*v1.Pod) ([]*v1.ReplicationController, []*v1.Pod) {
|
||||
var createdRCs []*v1.ReplicationController
|
||||
var createdPods []*v1.Pod
|
||||
for _, rc := range rcs {
|
||||
createdRC, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(rc)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create replication controller %s: %v", rc.Name, err)
|
||||
}
|
||||
createdRCs = append(createdRCs, createdRC)
|
||||
}
|
||||
for _, pod := range pods {
|
||||
createdPod, err := clientSet.CoreV1().Pods(pod.Namespace).Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod %s: %v", pod.Name, err)
|
||||
}
|
||||
createdPods = append(createdPods, createdPod)
|
||||
}
|
||||
|
||||
return createdRCs, createdPods
|
||||
}
|
||||
|
||||
// Verify .Status.Replicas is equal to .Spec.Replicas
|
||||
func waitRCStable(t *testing.T, clientSet clientset.Interface, rc *v1.ReplicationController) {
|
||||
rcClient := clientSet.CoreV1().ReplicationControllers(rc.Namespace)
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
newRC, err := rcClient.Get(rc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return newRC.Status.Replicas == *rc.Spec.Replicas, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to verify .Status.Replicas is equal to .Spec.Replicas for rc %s: %v", rc.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Update .Spec.Replicas to replicas and verify .Status.Replicas is changed accordingly
|
||||
func scaleRC(t *testing.T, c clientset.Interface, rc *v1.ReplicationController, replicas int32) {
|
||||
rcClient := c.CoreV1().ReplicationControllers(rc.Namespace)
|
||||
rc = updateRC(t, rcClient, rc.Name, func(rc *v1.ReplicationController) {
|
||||
*rc.Spec.Replicas = replicas
|
||||
})
|
||||
waitRCStable(t, c, rc)
|
||||
}
|
||||
|
||||
func updatePod(t *testing.T, podClient typedv1.PodInterface, podName string, updateFunc func(*v1.Pod)) *v1.Pod {
|
||||
var pod *v1.Pod
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
newPod, err := podClient.Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updateFunc(newPod)
|
||||
pod, err = podClient.Update(newPod)
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to update pod %s: %v", podName, err)
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func updatePodStatus(t *testing.T, podClient typedv1.PodInterface, pod *v1.Pod, updateStatusFunc func(*v1.Pod)) {
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
newPod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updateStatusFunc(newPod)
|
||||
_, err = podClient.UpdateStatus(newPod)
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func getPods(t *testing.T, podClient typedv1.PodInterface, labelMap map[string]string) *v1.PodList {
|
||||
podSelector := labels.Set(labelMap).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: podSelector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed obtaining a list of pods that match the pod labels %v: %v", labelMap, err)
|
||||
}
|
||||
return pods
|
||||
}
|
||||
|
||||
func updateRC(t *testing.T, rcClient typedv1.ReplicationControllerInterface, rcName string, updateFunc func(*v1.ReplicationController)) *v1.ReplicationController {
|
||||
var rc *v1.ReplicationController
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
newRC, err := rcClient.Get(rcName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updateFunc(newRC)
|
||||
rc, err = rcClient.Update(newRC)
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to update rc %s: %v", rcName, err)
|
||||
}
|
||||
return rc
|
||||
}
|
||||
|
||||
// Verify ControllerRef of a RC pod that has incorrect attributes is automatically patched by the RC
|
||||
func testPodControllerRefPatch(t *testing.T, c clientset.Interface, pod *v1.Pod, ownerReference *metav1.OwnerReference, rc *v1.ReplicationController, expectedOwnerReferenceNum int) {
|
||||
ns := rc.Namespace
|
||||
podClient := c.CoreV1().Pods(ns)
|
||||
updatePod(t, podClient, pod.Name, func(pod *v1.Pod) {
|
||||
pod.OwnerReferences = []metav1.OwnerReference{*ownerReference}
|
||||
})
|
||||
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
newPod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return metav1.GetControllerOf(newPod) != nil, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to verify ControllerRef for the pod %s is not nil: %v", pod.Name, err)
|
||||
}
|
||||
|
||||
newPod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain pod %s: %v", pod.Name, err)
|
||||
}
|
||||
controllerRef := metav1.GetControllerOf(newPod)
|
||||
if controllerRef.UID != rc.UID {
|
||||
t.Fatalf("RC owner of the pod %s has a different UID: Expected %v, got %v", newPod.Name, rc.UID, controllerRef.UID)
|
||||
}
|
||||
ownerReferenceNum := len(newPod.GetOwnerReferences())
|
||||
if ownerReferenceNum != expectedOwnerReferenceNum {
|
||||
t.Fatalf("Unexpected number of owner references for pod %s: Expected %d, got %d", newPod.Name, expectedOwnerReferenceNum, ownerReferenceNum)
|
||||
}
|
||||
}
|
||||
|
||||
func setPodsReadyCondition(t *testing.T, clientSet clientset.Interface, pods *v1.PodList, conditionStatus v1.ConditionStatus, lastTransitionTime time.Time) {
|
||||
replicas := int32(len(pods.Items))
|
||||
var readyPods int32
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
readyPods = 0
|
||||
for i := range pods.Items {
|
||||
pod := &pods.Items[i]
|
||||
if podutil.IsPodReady(pod) {
|
||||
readyPods++
|
||||
continue
|
||||
}
|
||||
pod.Status.Phase = v1.PodRunning
|
||||
_, condition := podutil.GetPodCondition(&pod.Status, v1.PodReady)
|
||||
if condition != nil {
|
||||
condition.Status = conditionStatus
|
||||
condition.LastTransitionTime = metav1.Time{Time: lastTransitionTime}
|
||||
} else {
|
||||
condition = &v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Status: conditionStatus,
|
||||
LastTransitionTime: metav1.Time{Time: lastTransitionTime},
|
||||
}
|
||||
pod.Status.Conditions = append(pod.Status.Conditions, *condition)
|
||||
}
|
||||
_, err := clientSet.CoreV1().Pods(pod.Namespace).UpdateStatus(pod)
|
||||
if err != nil {
|
||||
// When status fails to be updated, we continue to next pod
|
||||
continue
|
||||
}
|
||||
readyPods++
|
||||
}
|
||||
return readyPods >= replicas, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to mark all ReplicationController pods to ready: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func testScalingUsingScaleSubresource(t *testing.T, c clientset.Interface, rc *v1.ReplicationController, replicas int32) {
|
||||
ns := rc.Namespace
|
||||
rcClient := c.CoreV1().ReplicationControllers(ns)
|
||||
newRC, err := rcClient.Get(rc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain rc %s: %v", rc.Name, err)
|
||||
}
|
||||
kind := "ReplicationController"
|
||||
scaleClient := c.ExtensionsV1beta1().Scales(ns)
|
||||
scale, err := scaleClient.Get(kind, rc.Name)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain scale subresource for rc %s: %v", rc.Name, err)
|
||||
}
|
||||
if scale.Spec.Replicas != *newRC.Spec.Replicas {
|
||||
t.Fatalf("Scale subresource for rc %s does not match .Spec.Replicas: expected %d, got %d", rc.Name, *newRC.Spec.Replicas, scale.Spec.Replicas)
|
||||
}
|
||||
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
scale, err := scaleClient.Get(kind, rc.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scale.Spec.Replicas = replicas
|
||||
_, err = scaleClient.Update(kind, scale)
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to set .Spec.Replicas of scale subresource for rc %s: %v", rc.Name, err)
|
||||
}
|
||||
|
||||
newRC, err = rcClient.Get(rc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain rc %s: %v", rc.Name, err)
|
||||
}
|
||||
if *newRC.Spec.Replicas != replicas {
|
||||
t.Fatalf(".Spec.Replicas of rc %s does not match its scale subresource: expected %d, got %d", rc.Name, replicas, *newRC.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdoption(t *testing.T) {
|
||||
boolPtr := func(b bool) *bool { return &b }
|
||||
testCases := []struct {
|
||||
name string
|
||||
existingOwnerReferences func(rc *v1.ReplicationController) []metav1.OwnerReference
|
||||
expectedOwnerReferences func(rc *v1.ReplicationController) []metav1.OwnerReference
|
||||
}{
|
||||
{
|
||||
"pod refers rc as an owner, not a controller",
|
||||
func(rc *v1.ReplicationController) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController"}}
|
||||
},
|
||||
func(rc *v1.ReplicationController) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true), BlockOwnerDeletion: boolPtr(true)}}
|
||||
},
|
||||
},
|
||||
{
|
||||
"pod doesn't have owner references",
|
||||
func(rc *v1.ReplicationController) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{}
|
||||
},
|
||||
func(rc *v1.ReplicationController) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true), BlockOwnerDeletion: boolPtr(true)}}
|
||||
},
|
||||
},
|
||||
{
|
||||
"pod refers rc as a controller",
|
||||
func(rc *v1.ReplicationController) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true)}}
|
||||
},
|
||||
func(rc *v1.ReplicationController) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true)}}
|
||||
},
|
||||
},
|
||||
{
|
||||
"pod refers other rc as the controller, refers the rc as an owner",
|
||||
func(rc *v1.ReplicationController) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{
|
||||
{UID: "1", Name: "anotherRC", APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true)},
|
||||
{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController"},
|
||||
}
|
||||
},
|
||||
func(rc *v1.ReplicationController) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{
|
||||
{UID: "1", Name: "anotherRC", APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true)},
|
||||
{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController"},
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
func() {
|
||||
s, closeFn, rm, informers, clientSet := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace(fmt.Sprintf("rc-adoption-%d", i), s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name)
|
||||
podClient := clientSet.CoreV1().Pods(ns.Name)
|
||||
const rcName = "rc"
|
||||
rc, err := rcClient.Create(newRC(rcName, ns.Name, 1))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create replication controllers: %v", err)
|
||||
}
|
||||
podName := fmt.Sprintf("pod%d", i)
|
||||
pod := newMatchingPod(podName, ns.Name)
|
||||
pod.OwnerReferences = tc.existingOwnerReferences(rc)
|
||||
_, err = podClient.Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create Pod: %v", err)
|
||||
}
|
||||
|
||||
stopCh := runControllerAndInformers(t, rm, informers, 1)
|
||||
defer close(stopCh)
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
updatedPod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if e, a := tc.expectedOwnerReferences(rc), updatedPod.OwnerReferences; reflect.DeepEqual(e, a) {
|
||||
return true, nil
|
||||
} else {
|
||||
t.Logf("ownerReferences don't match, expect %v, got %v", e, a)
|
||||
return false, nil
|
||||
}
|
||||
}); err != nil {
|
||||
t.Fatalf("test %q failed: %v", tc.name, err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestSpecReplicasChange(t *testing.T) {
|
||||
s, closeFn, rm, informers, c := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("test-spec-replicas-change", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
stopCh := runControllerAndInformers(t, rm, informers, 0)
|
||||
defer close(stopCh)
|
||||
|
||||
rc := newRC("rc", ns.Name, 2)
|
||||
rcs, _ := createRCsPods(t, c, []*v1.ReplicationController{rc}, []*v1.Pod{})
|
||||
rc = rcs[0]
|
||||
waitRCStable(t, c, rc)
|
||||
|
||||
// Update .Spec.Replicas and verify .Status.Replicas is changed accordingly
|
||||
scaleRC(t, c, rc, 3)
|
||||
scaleRC(t, c, rc, 0)
|
||||
scaleRC(t, c, rc, 2)
|
||||
|
||||
// Add a template annotation change to test RC's status does update
|
||||
// without .Spec.Replicas change
|
||||
rcClient := c.CoreV1().ReplicationControllers(ns.Name)
|
||||
var oldGeneration int64
|
||||
newRC := updateRC(t, rcClient, rc.Name, func(rc *v1.ReplicationController) {
|
||||
oldGeneration = rc.Generation
|
||||
rc.Spec.Template.Annotations = map[string]string{"test": "annotation"}
|
||||
})
|
||||
savedGeneration := newRC.Generation
|
||||
if savedGeneration == oldGeneration {
|
||||
t.Fatalf("Failed to verify .Generation has incremented for rc %s", rc.Name)
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
newRC, err := rcClient.Get(rc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return newRC.Status.ObservedGeneration >= savedGeneration, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to verify .Status.ObservedGeneration has incremented for rc %s: %v", rc.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeletingAndFailedPods(t *testing.T) {
|
||||
s, closeFn, rm, informers, c := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("test-deleting-and-failed-pods", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
stopCh := runControllerAndInformers(t, rm, informers, 0)
|
||||
defer close(stopCh)
|
||||
|
||||
rc := newRC("rc", ns.Name, 2)
|
||||
rcs, _ := createRCsPods(t, c, []*v1.ReplicationController{rc}, []*v1.Pod{})
|
||||
rc = rcs[0]
|
||||
waitRCStable(t, c, rc)
|
||||
|
||||
// Verify RC creates 2 pods
|
||||
podClient := c.CoreV1().Pods(ns.Name)
|
||||
pods := getPods(t, podClient, labelMap())
|
||||
if len(pods.Items) != 2 {
|
||||
t.Fatalf("len(pods) = %d, want 2", len(pods.Items))
|
||||
}
|
||||
|
||||
// Set first pod as deleting pod
|
||||
// Set finalizers for the pod to simulate pending deletion status
|
||||
deletingPod := &pods.Items[0]
|
||||
updatePod(t, podClient, deletingPod.Name, func(pod *v1.Pod) {
|
||||
pod.Finalizers = []string{"fake.example.com/blockDeletion"}
|
||||
})
|
||||
if err := c.CoreV1().Pods(ns.Name).Delete(deletingPod.Name, &metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("Error deleting pod %s: %v", deletingPod.Name, err)
|
||||
}
|
||||
|
||||
// Set second pod as failed pod
|
||||
failedPod := &pods.Items[1]
|
||||
updatePodStatus(t, podClient, failedPod, func(pod *v1.Pod) {
|
||||
pod.Status.Phase = v1.PodFailed
|
||||
})
|
||||
|
||||
// Pool until 2 new pods have been created to replace deleting and failed pods
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
pods = getPods(t, podClient, labelMap())
|
||||
return len(pods.Items) == 4, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to verify 2 new pods have been created (expected 4 pods): %v", err)
|
||||
}
|
||||
|
||||
// Verify deleting and failed pods are among the four pods
|
||||
foundDeletingPod := false
|
||||
foundFailedPod := false
|
||||
for _, pod := range pods.Items {
|
||||
if pod.UID == deletingPod.UID {
|
||||
foundDeletingPod = true
|
||||
}
|
||||
if pod.UID == failedPod.UID {
|
||||
foundFailedPod = true
|
||||
}
|
||||
}
|
||||
// Verify deleting pod exists
|
||||
if !foundDeletingPod {
|
||||
t.Fatalf("expected deleting pod %s exists, but it is not found", deletingPod.Name)
|
||||
}
|
||||
// Verify failed pod exists
|
||||
if !foundFailedPod {
|
||||
t.Fatalf("expected failed pod %s exists, but it is not found", failedPod.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOverlappingRCs(t *testing.T) {
|
||||
s, closeFn, rm, informers, c := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("test-overlapping-rcs", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
stopCh := runControllerAndInformers(t, rm, informers, 0)
|
||||
defer close(stopCh)
|
||||
|
||||
// Create 2 RCs with identical selectors
|
||||
for i := 0; i < 2; i++ {
|
||||
// One RC has 1 replica, and another has 2 replicas
|
||||
rc := newRC(fmt.Sprintf("rc-%d", i+1), ns.Name, i+1)
|
||||
rcs, _ := createRCsPods(t, c, []*v1.ReplicationController{rc}, []*v1.Pod{})
|
||||
waitRCStable(t, c, rcs[0])
|
||||
}
|
||||
|
||||
// Expect 3 total Pods to be created
|
||||
podClient := c.CoreV1().Pods(ns.Name)
|
||||
pods := getPods(t, podClient, labelMap())
|
||||
if len(pods.Items) != 3 {
|
||||
t.Errorf("len(pods) = %d, want 3", len(pods.Items))
|
||||
}
|
||||
|
||||
// Expect both RCs have .status.replicas = .spec.replicas
|
||||
for i := 0; i < 2; i++ {
|
||||
newRC, err := c.CoreV1().ReplicationControllers(ns.Name).Get(fmt.Sprintf("rc-%d", i+1), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to obtain rc rc-%d: %v", i+1, err)
|
||||
}
|
||||
if newRC.Status.Replicas != *newRC.Spec.Replicas {
|
||||
t.Fatalf(".Status.Replicas %d is not equal to .Spec.Replicas %d", newRC.Status.Replicas, *newRC.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
|
||||
s, closeFn, rm, informers, c := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("test-pod-orphaning-and-adoption-when-labels-change", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
stopCh := runControllerAndInformers(t, rm, informers, 0)
|
||||
defer close(stopCh)
|
||||
|
||||
rc := newRC("rc", ns.Name, 1)
|
||||
rcs, _ := createRCsPods(t, c, []*v1.ReplicationController{rc}, []*v1.Pod{})
|
||||
rc = rcs[0]
|
||||
waitRCStable(t, c, rc)
|
||||
|
||||
// Orphaning: RC should remove OwnerReference from a pod when the pod's labels change to not match its labels
|
||||
podClient := c.CoreV1().Pods(ns.Name)
|
||||
pods := getPods(t, podClient, labelMap())
|
||||
if len(pods.Items) != 1 {
|
||||
t.Fatalf("len(pods) = %d, want 1", len(pods.Items))
|
||||
}
|
||||
pod := &pods.Items[0]
|
||||
|
||||
// Start by verifying ControllerRef for the pod is not nil
|
||||
if metav1.GetControllerOf(pod) == nil {
|
||||
t.Fatalf("ControllerRef of pod %s is nil", pod.Name)
|
||||
}
|
||||
newLabelMap := map[string]string{"new-foo": "new-bar"}
|
||||
updatePod(t, podClient, pod.Name, func(pod *v1.Pod) {
|
||||
pod.Labels = newLabelMap
|
||||
})
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
newPod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
pod = newPod
|
||||
return metav1.GetControllerOf(newPod) == nil, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to verify ControllerRef for the pod %s is nil: %v", pod.Name, err)
|
||||
}
|
||||
|
||||
// Adoption: RC should add ControllerRef to a pod when the pod's labels change to match its labels
|
||||
updatePod(t, podClient, pod.Name, func(pod *v1.Pod) {
|
||||
pod.Labels = labelMap()
|
||||
})
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
newPod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// If the pod is not found, it means the RC picks the pod for deletion (it is extra)
|
||||
// Verify there is only one pod in namespace and it has ControllerRef to the RC
|
||||
if errors.IsNotFound(err) {
|
||||
pods := getPods(t, podClient, labelMap())
|
||||
if len(pods.Items) != 1 {
|
||||
return false, fmt.Errorf("Expected 1 pod in current namespace, got %d", len(pods.Items))
|
||||
}
|
||||
// Set the pod accordingly
|
||||
pod = &pods.Items[0]
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
// Always update the pod so that we can save a GET call to API server later
|
||||
pod = newPod
|
||||
// If the pod is found, verify the pod has a ControllerRef
|
||||
return metav1.GetControllerOf(newPod) != nil, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to verify ControllerRef for pod %s is not nil: %v", pod.Name, err)
|
||||
}
|
||||
// Verify the pod has a ControllerRef to the RC
|
||||
// Do nothing if the pod is nil (i.e., has been picked for deletion)
|
||||
if pod != nil {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef.UID != rc.UID {
|
||||
t.Fatalf("RC owner of the pod %s has a different UID: Expected %v, got %v", pod.Name, rc.UID, controllerRef.UID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGeneralPodAdoption(t *testing.T) {
|
||||
s, closeFn, rm, informers, c := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("test-general-pod-adoption", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
stopCh := runControllerAndInformers(t, rm, informers, 0)
|
||||
defer close(stopCh)
|
||||
|
||||
rc := newRC("rc", ns.Name, 1)
|
||||
rcs, _ := createRCsPods(t, c, []*v1.ReplicationController{rc}, []*v1.Pod{})
|
||||
rc = rcs[0]
|
||||
waitRCStable(t, c, rc)
|
||||
|
||||
podClient := c.CoreV1().Pods(ns.Name)
|
||||
pods := getPods(t, podClient, labelMap())
|
||||
if len(pods.Items) != 1 {
|
||||
t.Fatalf("len(pods) = %d, want 1", len(pods.Items))
|
||||
}
|
||||
pod := &pods.Items[0]
|
||||
var falseVar = false
|
||||
|
||||
// When the only OwnerReference of the pod points to another type of API object such as statefulset
|
||||
// with Controller=false, the RC should add a second OwnerReference (ControllerRef) pointing to itself
|
||||
// with Controller=true
|
||||
ownerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "apps/v1beta1", Kind: "StatefulSet", Name: rc.Name, Controller: &falseVar}
|
||||
testPodControllerRefPatch(t, c, pod, &ownerReference, rc, 2)
|
||||
|
||||
// When the only OwnerReference of the pod points to the RC, but Controller=false
|
||||
ownerReference = metav1.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name, Controller: &falseVar}
|
||||
testPodControllerRefPatch(t, c, pod, &ownerReference, rc, 1)
|
||||
}
|
||||
|
||||
func TestReadyAndAvailableReplicas(t *testing.T) {
|
||||
s, closeFn, rm, informers, c := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("test-ready-and-available-replicas", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
stopCh := runControllerAndInformers(t, rm, informers, 0)
|
||||
defer close(stopCh)
|
||||
|
||||
rc := newRC("rc", ns.Name, 3)
|
||||
rc.Spec.MinReadySeconds = 3600
|
||||
rcs, _ := createRCsPods(t, c, []*v1.ReplicationController{rc}, []*v1.Pod{})
|
||||
rc = rcs[0]
|
||||
waitRCStable(t, c, rc)
|
||||
|
||||
// First verify no pod is available
|
||||
if rc.Status.AvailableReplicas != 0 {
|
||||
t.Fatalf("Unexpected .Status.AvailableReplicas: Expected 0, saw %d", rc.Status.AvailableReplicas)
|
||||
}
|
||||
|
||||
podClient := c.CoreV1().Pods(ns.Name)
|
||||
pods := getPods(t, podClient, labelMap())
|
||||
if len(pods.Items) != 3 {
|
||||
t.Fatalf("len(pods) = %d, want 3", len(pods.Items))
|
||||
}
|
||||
|
||||
// Separate 3 pods into their own list
|
||||
firstPodList := &v1.PodList{Items: pods.Items[:1]}
|
||||
secondPodList := &v1.PodList{Items: pods.Items[1:2]}
|
||||
thirdPodList := &v1.PodList{Items: pods.Items[2:]}
|
||||
// First pod: Running, but not Ready
|
||||
// by setting the Ready condition to false with LastTransitionTime to be now
|
||||
setPodsReadyCondition(t, c, firstPodList, v1.ConditionFalse, time.Now())
|
||||
// Second pod: Running and Ready, but not Available
|
||||
// by setting LastTransitionTime to now
|
||||
setPodsReadyCondition(t, c, secondPodList, v1.ConditionTrue, time.Now())
|
||||
// Third pod: Running, Ready, and Available
|
||||
// by setting LastTransitionTime to more than 3600 seconds ago
|
||||
setPodsReadyCondition(t, c, thirdPodList, v1.ConditionTrue, time.Now().Add(-120*time.Minute))
|
||||
|
||||
rcClient := c.CoreV1().ReplicationControllers(ns.Name)
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
newRC, err := rcClient.Get(rc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Verify 3 pods exist, 2 pods are Ready, and 1 pod is Available
|
||||
return newRC.Status.Replicas == 3 && newRC.Status.ReadyReplicas == 2 && newRC.Status.AvailableReplicas == 1, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to verify number of Replicas, ReadyReplicas and AvailableReplicas of rc %s to be as expected: %v", rc.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRCScaleSubresource(t *testing.T) {
|
||||
s, closeFn, rm, informers, c := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("test-rc-scale-subresource", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
stopCh := runControllerAndInformers(t, rm, informers, 0)
|
||||
defer close(stopCh)
|
||||
|
||||
rc := newRC("rc", ns.Name, 1)
|
||||
rcs, _ := createRCsPods(t, c, []*v1.ReplicationController{rc}, []*v1.Pod{})
|
||||
rc = rcs[0]
|
||||
waitRCStable(t, c, rc)
|
||||
|
||||
// Use scale subresource to scale up .Spec.Replicas to 3
|
||||
testScalingUsingScaleSubresource(t, c, rc, 3)
|
||||
// Use the scale subresource to scale down .Spec.Replicas to 0
|
||||
testScalingUsingScaleSubresource(t, c, rc, 0)
|
||||
}
|
||||
|
||||
func TestExtraPodsAdoptionAndDeletion(t *testing.T) {
|
||||
s, closeFn, rm, informers, c := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("test-extra-pods-adoption-and-deletion", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
rc := newRC("rc", ns.Name, 2)
|
||||
// Create 3 pods, RC should adopt only 2 of them
|
||||
podList := []*v1.Pod{}
|
||||
for i := 0; i < 3; i++ {
|
||||
pod := newMatchingPod(fmt.Sprintf("pod-%d", i+1), ns.Name)
|
||||
pod.Labels = labelMap()
|
||||
podList = append(podList, pod)
|
||||
}
|
||||
rcs, _ := createRCsPods(t, c, []*v1.ReplicationController{rc}, podList)
|
||||
rc = rcs[0]
|
||||
stopCh := runControllerAndInformers(t, rm, informers, 3)
|
||||
defer close(stopCh)
|
||||
waitRCStable(t, c, rc)
|
||||
|
||||
// Verify the extra pod is deleted eventually by determining whether number of
|
||||
// all pods within namespace matches .spec.replicas of the RC (2 in this case)
|
||||
podClient := c.CoreV1().Pods(ns.Name)
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
// All pods have labelMap as their labels
|
||||
pods := getPods(t, podClient, labelMap())
|
||||
return int32(len(pods.Items)) == *rc.Spec.Replicas, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to verify number of all pods within current namespace matches .spec.replicas of rc %s: %v", rc.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFullyLabeledReplicas(t *testing.T) {
|
||||
s, closeFn, rm, informers, c := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("test-fully-labeled-replicas", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
stopCh := runControllerAndInformers(t, rm, informers, 0)
|
||||
defer close(stopCh)
|
||||
|
||||
extraLabelMap := map[string]string{"foo": "bar", "extraKey": "extraValue"}
|
||||
rc := newRC("rc", ns.Name, 2)
|
||||
rcs, _ := createRCsPods(t, c, []*v1.ReplicationController{rc}, []*v1.Pod{})
|
||||
rc = rcs[0]
|
||||
waitRCStable(t, c, rc)
|
||||
|
||||
// Change RC's template labels to have extra labels, but not its selector
|
||||
rcClient := c.CoreV1().ReplicationControllers(ns.Name)
|
||||
updateRC(t, rcClient, rc.Name, func(rc *v1.ReplicationController) {
|
||||
rc.Spec.Template.Labels = extraLabelMap
|
||||
})
|
||||
|
||||
// Set one of the pods to have extra labels
|
||||
podClient := c.CoreV1().Pods(ns.Name)
|
||||
pods := getPods(t, podClient, labelMap())
|
||||
if len(pods.Items) != 2 {
|
||||
t.Fatalf("len(pods) = %d, want 2", len(pods.Items))
|
||||
}
|
||||
fullyLabeledPod := &pods.Items[0]
|
||||
updatePod(t, podClient, fullyLabeledPod.Name, func(pod *v1.Pod) {
|
||||
pod.Labels = extraLabelMap
|
||||
})
|
||||
|
||||
// Verify only one pod is fully labeled
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
newRC, err := rcClient.Get(rc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return (newRC.Status.Replicas == 2 && newRC.Status.FullyLabeledReplicas == 1), nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to verify only one pod is fully labeled: %v", err)
|
||||
}
|
||||
}
|
38
vendor/k8s.io/kubernetes/test/integration/scale/BUILD
generated
vendored
Normal file
38
vendor/k8s.io/kubernetes/test/integration/scale/BUILD
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = ["scale_test.go"],
|
||||
importpath = "k8s.io/kubernetes/test/integration/scale",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app/testing:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/coreos/pkg/capnslog:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
241
vendor/k8s.io/kubernetes/test/integration/scale/scale_test.go
generated
vendored
Normal file
241
vendor/k8s.io/kubernetes/test/integration/scale/scale_test.go
generated
vendored
Normal file
@ -0,0 +1,241 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scale
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
apitesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
type subresourceTest struct {
|
||||
resource schema.GroupVersionResource
|
||||
kind schema.GroupVersionKind
|
||||
}
|
||||
|
||||
func makeGVR(group, version, resource string) schema.GroupVersionResource {
|
||||
return schema.GroupVersionResource{Group: group, Version: version, Resource: resource}
|
||||
}
|
||||
func makeGVK(group, version, kind string) schema.GroupVersionKind {
|
||||
return schema.GroupVersionKind{Group: group, Version: version, Kind: kind}
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
||||
|
||||
func TestScaleSubresources(t *testing.T) {
|
||||
clientSet, tearDown := setup(t)
|
||||
defer tearDown()
|
||||
|
||||
resourceLists, err := clientSet.Discovery().ServerResources()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedScaleSubresources := map[schema.GroupVersionResource]schema.GroupVersionKind{
|
||||
makeGVR("", "v1", "replicationcontrollers/scale"): makeGVK("autoscaling", "v1", "Scale"),
|
||||
|
||||
makeGVR("extensions", "v1beta1", "deployments/scale"): makeGVK("extensions", "v1beta1", "Scale"),
|
||||
makeGVR("extensions", "v1beta1", "replicationcontrollers/scale"): makeGVK("extensions", "v1beta1", "Scale"),
|
||||
makeGVR("extensions", "v1beta1", "replicasets/scale"): makeGVK("extensions", "v1beta1", "Scale"),
|
||||
|
||||
makeGVR("apps", "v1beta1", "deployments/scale"): makeGVK("apps", "v1beta1", "Scale"),
|
||||
makeGVR("apps", "v1beta1", "statefulsets/scale"): makeGVK("apps", "v1beta1", "Scale"),
|
||||
|
||||
makeGVR("apps", "v1beta2", "deployments/scale"): makeGVK("apps", "v1beta2", "Scale"),
|
||||
makeGVR("apps", "v1beta2", "replicasets/scale"): makeGVK("apps", "v1beta2", "Scale"),
|
||||
makeGVR("apps", "v1beta2", "statefulsets/scale"): makeGVK("apps", "v1beta2", "Scale"),
|
||||
|
||||
makeGVR("apps", "v1", "deployments/scale"): makeGVK("autoscaling", "v1", "Scale"),
|
||||
makeGVR("apps", "v1", "replicasets/scale"): makeGVK("autoscaling", "v1", "Scale"),
|
||||
makeGVR("apps", "v1", "statefulsets/scale"): makeGVK("autoscaling", "v1", "Scale"),
|
||||
}
|
||||
|
||||
autoscalingGVK := schema.GroupVersionKind{Group: "autoscaling", Version: "v1", Kind: "Scale"}
|
||||
|
||||
discoveredScaleSubresources := map[schema.GroupVersionResource]schema.GroupVersionKind{}
|
||||
for _, resourceList := range resourceLists {
|
||||
containingGV, err := schema.ParseGroupVersion(resourceList.GroupVersion)
|
||||
if err != nil {
|
||||
t.Fatalf("error getting group version for %#v: %v", resourceList, err)
|
||||
}
|
||||
|
||||
for _, resource := range resourceList.APIResources {
|
||||
if !strings.HasSuffix(resource.Name, "/scale") {
|
||||
continue
|
||||
}
|
||||
|
||||
gvr := containingGV.WithResource(resource.Name)
|
||||
if _, exists := discoveredScaleSubresources[gvr]; exists {
|
||||
t.Errorf("scale subresource %#v listed multiple times in discovery", gvr)
|
||||
continue
|
||||
}
|
||||
|
||||
gvk := containingGV.WithKind(resource.Kind)
|
||||
if resource.Group != "" {
|
||||
gvk.Group = resource.Group
|
||||
}
|
||||
if resource.Version != "" {
|
||||
gvk.Version = resource.Version
|
||||
}
|
||||
discoveredScaleSubresources[gvr] = gvk
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure nothing is missing
|
||||
for gvr, gvk := range expectedScaleSubresources {
|
||||
if _, ok := discoveredScaleSubresources[gvr]; !ok {
|
||||
t.Errorf("expected scale subresource %#v of kind %#v was missing from discovery", gvr, gvk)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure discovery lists expected types
|
||||
for gvr, gvk := range discoveredScaleSubresources {
|
||||
if expectedGVK, expected := expectedScaleSubresources[gvr]; !expected {
|
||||
if gvk == autoscalingGVK {
|
||||
t.Errorf("unexpected scale subresource %#v of kind %#v. new scale subresource should be added to expectedScaleSubresources", gvr, gvk)
|
||||
} else {
|
||||
t.Errorf("unexpected scale subresource %#v of kind %#v. new scale resources are expected to use Scale from the autoscaling/v1 API group", gvr, gvk)
|
||||
}
|
||||
continue
|
||||
} else if expectedGVK != gvk {
|
||||
t.Errorf("scale subresource %#v should be of kind %#v, but %#v was listed in discovery", gvr, expectedGVK, gvk)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Create objects required to exercise scale subresources
|
||||
if _, err := clientSet.CoreV1().ReplicationControllers("default").Create(rcStub); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := clientSet.AppsV1beta2().ReplicaSets("default").Create(rsStub); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := clientSet.AppsV1beta2().Deployments("default").Create(deploymentStub); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := clientSet.AppsV1beta2().StatefulSets("default").Create(ssStub); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Ensure scale subresources return and accept expected kinds
|
||||
for gvr, gvk := range discoveredScaleSubresources {
|
||||
prefix := "/apis"
|
||||
if gvr.Group == corev1.GroupName {
|
||||
prefix = "/api"
|
||||
}
|
||||
|
||||
resourceParts := strings.SplitN(gvr.Resource, "/", 2)
|
||||
|
||||
urlPath := path.Join(prefix, gvr.Group, gvr.Version, "namespaces", "default", resourceParts[0], "test", resourceParts[1])
|
||||
obj := &unstructured.Unstructured{}
|
||||
|
||||
getData, err := clientSet.CoreV1().RESTClient().Get().AbsPath(urlPath).DoRaw()
|
||||
if err != nil {
|
||||
t.Errorf("error fetching %s: %v", urlPath, err)
|
||||
continue
|
||||
}
|
||||
if err := json.Unmarshal(getData, obj); err != nil {
|
||||
t.Errorf("error decoding %s: %v", urlPath, err)
|
||||
t.Log(string(getData))
|
||||
continue
|
||||
}
|
||||
|
||||
if obj.GetObjectKind().GroupVersionKind() != gvk {
|
||||
t.Errorf("expected %#v, got %#v from %s", gvk, obj.GetObjectKind().GroupVersionKind(), urlPath)
|
||||
t.Log(string(getData))
|
||||
continue
|
||||
}
|
||||
|
||||
updateData, err := clientSet.CoreV1().RESTClient().Put().AbsPath(urlPath).Body(getData).DoRaw()
|
||||
if err != nil {
|
||||
t.Errorf("error putting to %s: %v", urlPath, err)
|
||||
t.Log(string(getData))
|
||||
t.Log(string(updateData))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
replicas = int32(1)
|
||||
|
||||
podStub = corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}},
|
||||
Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "test", Image: "busybox"}}},
|
||||
}
|
||||
|
||||
rcStub = &corev1.ReplicationController{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test"},
|
||||
Spec: corev1.ReplicationControllerSpec{Selector: podStub.Labels, Replicas: &replicas, Template: &podStub},
|
||||
}
|
||||
|
||||
rsStub = &appsv1beta2.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test"},
|
||||
Spec: appsv1beta2.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: podStub.Labels}, Replicas: &replicas, Template: podStub},
|
||||
}
|
||||
|
||||
deploymentStub = &appsv1beta2.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test"},
|
||||
Spec: appsv1beta2.DeploymentSpec{Selector: &metav1.LabelSelector{MatchLabels: podStub.Labels}, Replicas: &replicas, Template: podStub},
|
||||
}
|
||||
|
||||
ssStub = &appsv1beta2.StatefulSet{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test"},
|
||||
Spec: appsv1beta2.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: podStub.Labels}, Replicas: &replicas, Template: podStub},
|
||||
}
|
||||
)
|
||||
|
||||
func setup(t *testing.T) (client kubernetes.Interface, tearDown func()) {
|
||||
result := apitesting.StartTestServerOrDie(t, nil, framework.SharedEtcd())
|
||||
|
||||
// TODO: Disable logging here until we resolve teardown issues which result in
|
||||
// massive log spam. Another path forward would be to refactor
|
||||
// StartTestServerOrDie to work with the etcd instance already started by the
|
||||
// integration test scripts.
|
||||
// See https://github.com/kubernetes/kubernetes/issues/49489.
|
||||
repo, err := capnslog.GetRepoLogger("github.com/coreos/etcd")
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't configure logging: %v", err)
|
||||
}
|
||||
repo.SetLogLevel(map[string]capnslog.LogLevel{
|
||||
"etcdserver/api/v3rpc": capnslog.CRITICAL,
|
||||
})
|
||||
|
||||
result.ClientConfig.AcceptContentTypes = ""
|
||||
result.ClientConfig.ContentType = ""
|
||||
result.ClientConfig.NegotiatedSerializer = nil
|
||||
clientSet, err := kubernetes.NewForConfig(result.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating clientset: %v", err)
|
||||
}
|
||||
|
||||
return clientSet, result.TearDownFn
|
||||
}
|
109
vendor/k8s.io/kubernetes/test/integration/scheduler/BUILD
generated
vendored
Normal file
109
vendor/k8s.io/kubernetes/test/integration/scheduler/BUILD
generated
vendored
Normal file
@ -0,0 +1,109 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"extender_test.go",
|
||||
"main_test.go",
|
||||
"predicates_test.go",
|
||||
"preemption_test.go",
|
||||
"priorities_test.go",
|
||||
"scheduler_test.go",
|
||||
"taint_test.go",
|
||||
"volume_binding_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/scheduler",
|
||||
library = ":go_default_library",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//pkg/controller/node:go_default_library",
|
||||
"//pkg/controller/node/ipam:go_default_library",
|
||||
"//pkg/controller/volume/persistentvolume:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//plugin/cmd/kube-scheduler/app:go_default_library",
|
||||
"//plugin/pkg/admission/podtolerationrestriction:go_default_library",
|
||||
"//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library",
|
||||
"//plugin/pkg/scheduler:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//plugin/pkg/scheduler/api:go_default_library",
|
||||
"//plugin/pkg/scheduler/core:go_default_library",
|
||||
"//plugin/pkg/scheduler/factory:go_default_library",
|
||||
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["util.go"],
|
||||
importpath = "k8s.io/kubernetes/test/integration/scheduler",
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//plugin/pkg/scheduler:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//plugin/pkg/scheduler/factory:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
4
vendor/k8s.io/kubernetes/test/integration/scheduler/OWNERS
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/test/integration/scheduler/OWNERS
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
approvers:
|
||||
- sig-scheduling-maintainers
|
||||
reviewers:
|
||||
- sig-scheduling
|
452
vendor/k8s.io/kubernetes/test/integration/scheduler/extender_test.go
generated
vendored
Normal file
452
vendor/k8s.io/kubernetes/test/integration/scheduler/extender_test.go
generated
vendored
Normal file
@ -0,0 +1,452 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scheduler
|
||||
|
||||
// This file tests scheduler extender.
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
e2e "k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
filter = "filter"
|
||||
prioritize = "prioritize"
|
||||
bind = "bind"
|
||||
)
|
||||
|
||||
type fitPredicate func(pod *v1.Pod, node *v1.Node) (bool, error)
|
||||
type priorityFunc func(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error)
|
||||
|
||||
type priorityConfig struct {
|
||||
function priorityFunc
|
||||
weight int
|
||||
}
|
||||
|
||||
type Extender struct {
|
||||
name string
|
||||
predicates []fitPredicate
|
||||
prioritizers []priorityConfig
|
||||
nodeCacheCapable bool
|
||||
Client clientset.Interface
|
||||
}
|
||||
|
||||
func (e *Extender) serveHTTP(t *testing.T, w http.ResponseWriter, req *http.Request) {
|
||||
decoder := json.NewDecoder(req.Body)
|
||||
defer req.Body.Close()
|
||||
|
||||
encoder := json.NewEncoder(w)
|
||||
|
||||
if strings.Contains(req.URL.Path, filter) || strings.Contains(req.URL.Path, prioritize) {
|
||||
var args schedulerapi.ExtenderArgs
|
||||
|
||||
if err := decoder.Decode(&args); err != nil {
|
||||
http.Error(w, "Decode error", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if strings.Contains(req.URL.Path, filter) {
|
||||
resp := &schedulerapi.ExtenderFilterResult{}
|
||||
resp, err := e.Filter(&args)
|
||||
if err != nil {
|
||||
resp.Error = err.Error()
|
||||
}
|
||||
|
||||
if err := encoder.Encode(resp); err != nil {
|
||||
t.Fatalf("Failed to encode %v", resp)
|
||||
}
|
||||
} else if strings.Contains(req.URL.Path, prioritize) {
|
||||
// Prioritize errors are ignored. Default k8s priorities or another extender's
|
||||
// priorities may be applied.
|
||||
priorities, _ := e.Prioritize(&args)
|
||||
|
||||
if err := encoder.Encode(priorities); err != nil {
|
||||
t.Fatalf("Failed to encode %+v", priorities)
|
||||
}
|
||||
}
|
||||
} else if strings.Contains(req.URL.Path, bind) {
|
||||
var args schedulerapi.ExtenderBindingArgs
|
||||
|
||||
if err := decoder.Decode(&args); err != nil {
|
||||
http.Error(w, "Decode error", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
resp := &schedulerapi.ExtenderBindingResult{}
|
||||
|
||||
if err := e.Bind(&args); err != nil {
|
||||
resp.Error = err.Error()
|
||||
}
|
||||
|
||||
if err := encoder.Encode(resp); err != nil {
|
||||
t.Fatalf("Failed to encode %+v", resp)
|
||||
}
|
||||
} else {
|
||||
http.Error(w, "Unknown method", http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Extender) filterUsingNodeCache(args *schedulerapi.ExtenderArgs) (*schedulerapi.ExtenderFilterResult, error) {
|
||||
nodeSlice := make([]string, 0)
|
||||
failedNodesMap := schedulerapi.FailedNodesMap{}
|
||||
for _, nodeName := range *args.NodeNames {
|
||||
fits := true
|
||||
for _, predicate := range e.predicates {
|
||||
fit, err := predicate(&args.Pod,
|
||||
&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}})
|
||||
if err != nil {
|
||||
return &schedulerapi.ExtenderFilterResult{
|
||||
Nodes: nil,
|
||||
NodeNames: nil,
|
||||
FailedNodes: schedulerapi.FailedNodesMap{},
|
||||
Error: err.Error(),
|
||||
}, err
|
||||
}
|
||||
if !fit {
|
||||
fits = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if fits {
|
||||
nodeSlice = append(nodeSlice, nodeName)
|
||||
} else {
|
||||
failedNodesMap[nodeName] = fmt.Sprintf("extender failed: %s", e.name)
|
||||
}
|
||||
}
|
||||
|
||||
return &schedulerapi.ExtenderFilterResult{
|
||||
Nodes: nil,
|
||||
NodeNames: &nodeSlice,
|
||||
FailedNodes: failedNodesMap,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *Extender) Filter(args *schedulerapi.ExtenderArgs) (*schedulerapi.ExtenderFilterResult, error) {
|
||||
filtered := []v1.Node{}
|
||||
failedNodesMap := schedulerapi.FailedNodesMap{}
|
||||
|
||||
if e.nodeCacheCapable {
|
||||
return e.filterUsingNodeCache(args)
|
||||
} else {
|
||||
for _, node := range args.Nodes.Items {
|
||||
fits := true
|
||||
for _, predicate := range e.predicates {
|
||||
fit, err := predicate(&args.Pod, &node)
|
||||
if err != nil {
|
||||
return &schedulerapi.ExtenderFilterResult{
|
||||
Nodes: &v1.NodeList{},
|
||||
NodeNames: nil,
|
||||
FailedNodes: schedulerapi.FailedNodesMap{},
|
||||
Error: err.Error(),
|
||||
}, err
|
||||
}
|
||||
if !fit {
|
||||
fits = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if fits {
|
||||
filtered = append(filtered, node)
|
||||
} else {
|
||||
failedNodesMap[node.Name] = fmt.Sprintf("extender failed: %s", e.name)
|
||||
}
|
||||
}
|
||||
|
||||
return &schedulerapi.ExtenderFilterResult{
|
||||
Nodes: &v1.NodeList{Items: filtered},
|
||||
NodeNames: nil,
|
||||
FailedNodes: failedNodesMap,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Extender) Prioritize(args *schedulerapi.ExtenderArgs) (*schedulerapi.HostPriorityList, error) {
|
||||
result := schedulerapi.HostPriorityList{}
|
||||
combinedScores := map[string]int{}
|
||||
var nodes = &v1.NodeList{Items: []v1.Node{}}
|
||||
|
||||
if e.nodeCacheCapable {
|
||||
for _, nodeName := range *args.NodeNames {
|
||||
nodes.Items = append(nodes.Items, v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}})
|
||||
}
|
||||
} else {
|
||||
nodes = args.Nodes
|
||||
}
|
||||
|
||||
for _, prioritizer := range e.prioritizers {
|
||||
weight := prioritizer.weight
|
||||
if weight == 0 {
|
||||
continue
|
||||
}
|
||||
priorityFunc := prioritizer.function
|
||||
prioritizedList, err := priorityFunc(&args.Pod, nodes)
|
||||
if err != nil {
|
||||
return &schedulerapi.HostPriorityList{}, err
|
||||
}
|
||||
for _, hostEntry := range *prioritizedList {
|
||||
combinedScores[hostEntry.Host] += hostEntry.Score * weight
|
||||
}
|
||||
}
|
||||
for host, score := range combinedScores {
|
||||
result = append(result, schedulerapi.HostPriority{Host: host, Score: score})
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (e *Extender) Bind(binding *schedulerapi.ExtenderBindingArgs) error {
|
||||
b := &v1.Binding{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: binding.PodNamespace, Name: binding.PodName, UID: binding.PodUID},
|
||||
Target: v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: binding.Node,
|
||||
},
|
||||
}
|
||||
|
||||
return e.Client.CoreV1().Pods(b.Namespace).Bind(b)
|
||||
}
|
||||
|
||||
func machine_1_2_3_Predicate(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
if node.Name == "machine1" || node.Name == "machine2" || node.Name == "machine3" {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func machine_2_3_5_Predicate(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
if node.Name == "machine2" || node.Name == "machine3" || node.Name == "machine5" {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func machine_2_Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||
result := schedulerapi.HostPriorityList{}
|
||||
for _, node := range nodes.Items {
|
||||
score := 1
|
||||
if node.Name == "machine2" {
|
||||
score = 10
|
||||
}
|
||||
result = append(result, schedulerapi.HostPriority{
|
||||
Host: node.Name,
|
||||
Score: score,
|
||||
})
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func machine_3_Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||
result := schedulerapi.HostPriorityList{}
|
||||
for _, node := range nodes.Items {
|
||||
score := 1
|
||||
if node.Name == "machine3" {
|
||||
score = 10
|
||||
}
|
||||
result = append(result, schedulerapi.HostPriority{
|
||||
Host: node.Name,
|
||||
Score: score,
|
||||
})
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func TestSchedulerExtender(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("scheduler-extender", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
|
||||
extender1 := &Extender{
|
||||
name: "extender1",
|
||||
predicates: []fitPredicate{machine_1_2_3_Predicate},
|
||||
prioritizers: []priorityConfig{{machine_2_Prioritizer, 1}},
|
||||
}
|
||||
es1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
extender1.serveHTTP(t, w, req)
|
||||
}))
|
||||
defer es1.Close()
|
||||
|
||||
extender2 := &Extender{
|
||||
name: "extender2",
|
||||
predicates: []fitPredicate{machine_2_3_5_Predicate},
|
||||
prioritizers: []priorityConfig{{machine_3_Prioritizer, 1}},
|
||||
Client: clientSet,
|
||||
}
|
||||
es2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
extender2.serveHTTP(t, w, req)
|
||||
}))
|
||||
defer es2.Close()
|
||||
|
||||
extender3 := &Extender{
|
||||
name: "extender3",
|
||||
predicates: []fitPredicate{machine_1_2_3_Predicate},
|
||||
prioritizers: []priorityConfig{{machine_2_Prioritizer, 5}},
|
||||
nodeCacheCapable: true,
|
||||
}
|
||||
es3 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
extender3.serveHTTP(t, w, req)
|
||||
}))
|
||||
defer es3.Close()
|
||||
|
||||
policy := schedulerapi.Policy{
|
||||
ExtenderConfigs: []schedulerapi.ExtenderConfig{
|
||||
{
|
||||
URLPrefix: es1.URL,
|
||||
FilterVerb: filter,
|
||||
PrioritizeVerb: prioritize,
|
||||
Weight: 3,
|
||||
EnableHttps: false,
|
||||
},
|
||||
{
|
||||
URLPrefix: es2.URL,
|
||||
FilterVerb: filter,
|
||||
PrioritizeVerb: prioritize,
|
||||
BindVerb: bind,
|
||||
Weight: 4,
|
||||
EnableHttps: false,
|
||||
},
|
||||
{
|
||||
URLPrefix: es3.URL,
|
||||
FilterVerb: filter,
|
||||
PrioritizeVerb: prioritize,
|
||||
Weight: 10,
|
||||
EnableHttps: false,
|
||||
NodeCacheCapable: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
policy.APIVersion = testapi.Groups[v1.GroupName].GroupVersion().String()
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
schedulerConfigFactory := factory.NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
clientSet,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
enableEquivalenceCache,
|
||||
)
|
||||
schedulerConfig, err := schedulerConfigFactory.CreateFromConfig(policy)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(clientSet.CoreV1().RESTClient()).Events("")})
|
||||
scheduler, _ := scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{Config: schedulerConfig}, nil...)
|
||||
informerFactory.Start(schedulerConfig.StopEverything)
|
||||
scheduler.Run()
|
||||
|
||||
defer close(schedulerConfig.StopEverything)
|
||||
|
||||
DoTestPodScheduling(ns, t, clientSet)
|
||||
}
|
||||
|
||||
func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) {
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (Nodes).
|
||||
defer cs.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
|
||||
goodCondition := v1.NodeCondition{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: fmt.Sprintf("schedulable condition"),
|
||||
LastHeartbeatTime: metav1.Time{Time: time.Now()},
|
||||
}
|
||||
node := &v1.Node{
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
},
|
||||
Conditions: []v1.NodeCondition{goodCondition},
|
||||
},
|
||||
}
|
||||
|
||||
for ii := 0; ii < 5; ii++ {
|
||||
node.Name = fmt.Sprintf("machine%d", ii+1)
|
||||
if _, err := cs.CoreV1().Nodes().Create(node); err != nil {
|
||||
t.Fatalf("Failed to create nodes: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "extender-test-pod"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}},
|
||||
},
|
||||
}
|
||||
|
||||
myPod, err := cs.CoreV1().Pods(ns.Name).Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(cs, myPod.Namespace, myPod.Name))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to schedule pod: %v", err)
|
||||
}
|
||||
|
||||
myPod, err = cs.CoreV1().Pods(ns.Name).Get(myPod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get pod: %v", err)
|
||||
} else if myPod.Spec.NodeName != "machine2" {
|
||||
t.Fatalf("Failed to schedule using extender, expected machine2, got %v", myPod.Spec.NodeName)
|
||||
}
|
||||
var gracePeriod int64
|
||||
if err := cs.CoreV1().Pods(ns.Name).Delete(myPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod}); err != nil {
|
||||
t.Fatalf("Failed to delete pod: %v", err)
|
||||
}
|
||||
_, err = cs.CoreV1().Pods(ns.Name).Get(myPod.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
t.Fatalf("Failed to delete pod: %v", err)
|
||||
}
|
||||
t.Logf("Scheduled pod using extenders")
|
||||
}
|
27
vendor/k8s.io/kubernetes/test/integration/scheduler/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/scheduler/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scheduler
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
872
vendor/k8s.io/kubernetes/test/integration/scheduler/predicates_test.go
generated
vendored
Normal file
872
vendor/k8s.io/kubernetes/test/integration/scheduler/predicates_test.go
generated
vendored
Normal file
@ -0,0 +1,872 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scheduler
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
// This file tests the scheduler predicates functionality.
|
||||
|
||||
const pollInterval = 100 * time.Millisecond
|
||||
|
||||
// TestInterPodAffinity verifies that scheduler's inter pod affinity and
|
||||
// anti-affinity predicate functions works correctly.
|
||||
func TestInterPodAffinity(t *testing.T) {
|
||||
context := initTest(t, "inter-pod-affinity")
|
||||
defer cleanupTest(t, context)
|
||||
// Add a few nodes.
|
||||
nodes, err := createNodes(context.clientSet, "testnode", nil, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
// Add labels to the nodes.
|
||||
labels1 := map[string]string{
|
||||
"region": "r1",
|
||||
"zone": "z11",
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if err = testutils.AddLabelsToNode(context.clientSet, node.Name, labels1); err != nil {
|
||||
t.Fatalf("Cannot add labels to node: %v", err)
|
||||
}
|
||||
if err = waitForNodeLabels(context.clientSet, node.Name, labels1); err != nil {
|
||||
t.Fatalf("Adding labels to node didn't succeed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
cs := context.clientSet
|
||||
podLabel := map[string]string{"service": "securityscan"}
|
||||
// podLabel2 := map[string]string{"security": "S1"}
|
||||
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
node *v1.Node
|
||||
fits bool
|
||||
errorType string
|
||||
test string
|
||||
}{
|
||||
/*{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "security",
|
||||
Operator: metav1.LabelSelectorOpDoesNotExist,
|
||||
Values: []string{"securityscan"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "region",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
node: nodes[0],
|
||||
fits: false,
|
||||
errorType: "invalidPod",
|
||||
test: "validates that a pod with an invalid podAffinity is rejected because of the LabelSelectorRequirement is invalid",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "security",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"securityscan"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "region",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
node: nodes[0],
|
||||
fits: false,
|
||||
test: "validates that Inter-pod-Affinity is respected if not matching",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"securityscan", "value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "region",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
NodeName: nodes[0].Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
node: nodes[0],
|
||||
fits: true,
|
||||
test: "validates that InterPodAffinity is respected if matching. requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using In operator that matches the existing pod",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpNotIn,
|
||||
Values: []string{"securityscan3", "value3"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "region",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
NodeName: nodes[0].Name},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
node: nodes[0],
|
||||
fits: true,
|
||||
test: "validates that InterPodAffinity is respected if matching. requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using not in operator in labelSelector that matches the existing pod",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"securityscan", "value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "region",
|
||||
Namespaces: []string{"diff-namespace"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
NodeName: nodes[0].Name},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel, Namespace: "ns"}}},
|
||||
node: nodes[0],
|
||||
fits: false,
|
||||
test: "validates that inter-pod-affinity is respected when pods have different Namespaces",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"antivirusscan", "value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "region",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
node: nodes[0],
|
||||
fits: false,
|
||||
test: "Doesn't satisfy the PodAffinity because of unmatching labelSelector with the existing pod",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpExists,
|
||||
}, {
|
||||
Key: "wrongkey",
|
||||
Operator: metav1.LabelSelectorOpDoesNotExist,
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "region",
|
||||
}, {
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"securityscan"},
|
||||
}, {
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpNotIn,
|
||||
Values: []string{"WrongValue"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "region",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
node: nodes[0],
|
||||
fits: true,
|
||||
test: "validates that InterPodAffinity is respected if matching with multiple affinities in multiple RequiredDuringSchedulingIgnoredDuringExecution ",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabel2,
|
||||
Name: "fakename",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpExists,
|
||||
}, {
|
||||
Key: "wrongkey",
|
||||
Operator: metav1.LabelSelectorOpDoesNotExist,
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "region",
|
||||
}, {
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"securityscan2"},
|
||||
}, {
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpNotIn,
|
||||
Values: []string{"WrongValue"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "region",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
node: nodes[0],
|
||||
fits: false,
|
||||
test: "The labelSelector requirements(items of matchExpressions) are ANDed, the pod cannot schedule onto the node because one of the matchExpression items doesn't match.",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"securityscan", "value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "region",
|
||||
},
|
||||
},
|
||||
},
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"antivirusscan", "value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "node",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
node: nodes[0],
|
||||
fits: true,
|
||||
test: "validates that InterPod Affinity and AntiAffinity is respected if matching",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"securityscan", "value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "region",
|
||||
},
|
||||
},
|
||||
},
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"antivirusscan", "value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "node",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
NodeName: nodes[0].Name,
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"antivirusscan", "value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "node",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel},
|
||||
},
|
||||
},
|
||||
node: nodes[0],
|
||||
fits: true,
|
||||
test: "satisfies the PodAffinity and PodAntiAffinity and PodAntiAffinity symmetry with the existing pod",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"securityscan", "value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "region",
|
||||
},
|
||||
},
|
||||
},
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"securityscan", "value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "zone",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
node: nodes[0],
|
||||
fits: false,
|
||||
test: "satisfies the PodAffinity but doesn't satisfies the PodAntiAffinity with the existing pod",
|
||||
},*/
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"securityscan", "value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "region",
|
||||
},
|
||||
},
|
||||
},
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"antivirusscan", "value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "node",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: nodes[0].Name,
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"securityscan", "value3"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "zone",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel},
|
||||
},
|
||||
},
|
||||
node: nodes[0],
|
||||
fits: false,
|
||||
test: "satisfies the PodAffinity and PodAntiAffinity but doesn't satisfies PodAntiAffinity symmetry with the existing pod",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpNotIn,
|
||||
Values: []string{"securityscan", "value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "region",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
node: nodes[0],
|
||||
fits: false,
|
||||
test: "pod matches its own Label in PodAffinity and that matches the existing pod Labels",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}}},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
Spec: v1.PodSpec{NodeName: nodes[0].Name,
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"securityscan", "value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "zone",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel},
|
||||
},
|
||||
},
|
||||
node: nodes[0],
|
||||
fits: false,
|
||||
test: "Verify that PodAntiAffinity of an existing pod is respected when PodAntiAffinity symmetry is not satisfied with the existing pod",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-name",
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}}},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
Spec: v1.PodSpec{NodeName: nodes[0].Name,
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpNotIn,
|
||||
Values: []string{"securityscan", "value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "zone",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-name2",
|
||||
Labels: podLabel},
|
||||
},
|
||||
},
|
||||
node: nodes[0],
|
||||
fits: true,
|
||||
test: "Verify that PodAntiAffinity from existing pod is respected when pod statisfies PodAntiAffinity symmetry with the existing pod",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "fake-name2"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
NodeSelector: map[string]string{"region": "r1"},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "foo",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"abc"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "region",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{Name: "fakename", Labels: map[string]string{"foo": "abc"}}},
|
||||
},
|
||||
fits: false,
|
||||
test: "nodes[0] and nodes[1] have same topologyKey and label value. nodes[0] has an existing pod that matches the inter pod affinity rule. The new pod can not be scheduled onto either of the two nodes.",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for _, pod := range test.pods {
|
||||
var nsName string
|
||||
if pod.Namespace != "" {
|
||||
nsName = pod.Namespace
|
||||
} else {
|
||||
nsName = context.ns.Name
|
||||
}
|
||||
createdPod, err := cs.CoreV1().Pods(nsName).Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Test Failed: error, %v, while creating pod during test: %v", err, test.test)
|
||||
}
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podScheduled(cs, createdPod.Namespace, createdPod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while waiting for pod during test, %v", err, test)
|
||||
}
|
||||
}
|
||||
testPod, err := cs.CoreV1().Pods(context.ns.Name).Create(test.pod)
|
||||
if err != nil {
|
||||
if !(test.errorType == "invalidPod" && errors.IsInvalid(err)) {
|
||||
t.Fatalf("Test Failed: error, %v, while creating pod during test: %v", err, test.test)
|
||||
}
|
||||
}
|
||||
|
||||
if test.fits {
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podScheduled(cs, testPod.Namespace, testPod.Name))
|
||||
} else {
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podUnschedulable(cs, testPod.Namespace, testPod.Name))
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: %v, err %v, test.fits %v", test.test, err, test.fits)
|
||||
}
|
||||
|
||||
err = cs.CoreV1().Pods(context.ns.Name).Delete(test.pod.Name, metav1.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while deleting pod during test: %v", err, test.test)
|
||||
}
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podDeleted(cs, context.ns.Name, test.pod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while waiting for pod to get deleted, %v", err, test.test)
|
||||
}
|
||||
for _, pod := range test.pods {
|
||||
var nsName string
|
||||
if pod.Namespace != "" {
|
||||
nsName = pod.Namespace
|
||||
} else {
|
||||
nsName = context.ns.Name
|
||||
}
|
||||
err = cs.CoreV1().Pods(nsName).Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while deleting pod during test: %v", err, test.test)
|
||||
}
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podDeleted(cs, nsName, pod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while waiting for pod to get deleted, %v", err, test.test)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
769
vendor/k8s.io/kubernetes/test/integration/scheduler/preemption_test.go
generated
vendored
Normal file
769
vendor/k8s.io/kubernetes/test/integration/scheduler/preemption_test.go
generated
vendored
Normal file
@ -0,0 +1,769 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file tests preemption functionality of the scheduler.
|
||||
|
||||
package scheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/core"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var lowPriority, mediumPriority, highPriority = int32(100), int32(200), int32(300)
|
||||
|
||||
func waitForNominatedNodeAnnotation(cs clientset.Interface, pod *v1.Pod) error {
|
||||
if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
pod, err := cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
annot, found := pod.Annotations[core.NominatedNodeAnnotationKey]
|
||||
if found && len(annot) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}); err != nil {
|
||||
return fmt.Errorf("Pod %v annotation did not get set: %v", pod.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TestPreemption tests a few preemption scenarios.
|
||||
func TestPreemption(t *testing.T) {
|
||||
// Enable PodPriority feature gate.
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
|
||||
// Initialize scheduler.
|
||||
context := initTest(t, "preemption")
|
||||
defer cleanupTest(t, context)
|
||||
cs := context.clientSet
|
||||
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI)},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
existingPods []*v1.Pod
|
||||
pod *v1.Pod
|
||||
preemptedPodIndexes map[int]struct{}
|
||||
}{
|
||||
{
|
||||
description: "basic pod preemption",
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "victim-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
},
|
||||
}),
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
},
|
||||
}),
|
||||
preemptedPodIndexes: map[int]struct{}{0: {}},
|
||||
},
|
||||
{
|
||||
description: "preemption is performed to satisfy anti-affinity",
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(cs, &pausePodConfig{
|
||||
Name: "pod-0", Namespace: context.ns.Name,
|
||||
Priority: &mediumPriority,
|
||||
Labels: map[string]string{"pod": "p0"},
|
||||
Resources: defaultPodRes,
|
||||
}),
|
||||
initPausePod(cs, &pausePodConfig{
|
||||
Name: "pod-1", Namespace: context.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Labels: map[string]string{"pod": "p1"},
|
||||
Resources: defaultPodRes,
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "pod",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"preemptor"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "node",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
// A higher priority pod with anti-affinity.
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Labels: map[string]string{"pod": "preemptor"},
|
||||
Resources: defaultPodRes,
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "pod",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"p0"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "node",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
preemptedPodIndexes: map[int]struct{}{0: {}, 1: {}},
|
||||
},
|
||||
{
|
||||
// This is similar to the previous case only pod-1 is high priority.
|
||||
description: "preemption is not performed when anti-affinity is not satisfied",
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(cs, &pausePodConfig{
|
||||
Name: "pod-0", Namespace: context.ns.Name,
|
||||
Priority: &mediumPriority,
|
||||
Labels: map[string]string{"pod": "p0"},
|
||||
Resources: defaultPodRes,
|
||||
}),
|
||||
initPausePod(cs, &pausePodConfig{
|
||||
Name: "pod-1", Namespace: context.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Labels: map[string]string{"pod": "p1"},
|
||||
Resources: defaultPodRes,
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "pod",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"preemptor"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "node",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
// A higher priority pod with anti-affinity.
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Labels: map[string]string{"pod": "preemptor"},
|
||||
Resources: defaultPodRes,
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "pod",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"p0"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "node",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
preemptedPodIndexes: map[int]struct{}{},
|
||||
},
|
||||
}
|
||||
|
||||
// Create a node with some resources and a label.
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.BinarySI),
|
||||
}
|
||||
node, err := createNode(context.clientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
nodeLabels := map[string]string{"node": node.Name}
|
||||
if err = testutils.AddLabelsToNode(context.clientSet, node.Name, nodeLabels); err != nil {
|
||||
t.Fatalf("Cannot add labels to node: %v", err)
|
||||
}
|
||||
if err = waitForNodeLabels(context.clientSet, node.Name, nodeLabels); err != nil {
|
||||
t.Fatalf("Adding labels to node didn't succeed: %v", err)
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
pods := make([]*v1.Pod, len(test.existingPods))
|
||||
// Create and run existingPods.
|
||||
for i, p := range test.existingPods {
|
||||
pods[i], err = runPausePod(cs, p)
|
||||
if err != nil {
|
||||
t.Fatalf("Test [%v]: Error running pause pod: %v", test.description, err)
|
||||
}
|
||||
}
|
||||
// Create the "pod".
|
||||
preemptor, err := createPausePod(cs, test.pod)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating high priority pod: %v", err)
|
||||
}
|
||||
// Wait for preemption of pods and make sure the other ones are not preempted.
|
||||
for i, p := range pods {
|
||||
if _, found := test.preemptedPodIndexes[i]; found {
|
||||
if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
|
||||
t.Errorf("Test [%v]: Pod %v is not getting evicted.", test.description, p.Name)
|
||||
}
|
||||
} else {
|
||||
if p.DeletionTimestamp != nil {
|
||||
t.Errorf("Test [%v]: Didn't expect pod %v to get preempted.", test.description, p.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Also check that the preemptor pod gets the annotation for nominated node name.
|
||||
if len(test.preemptedPodIndexes) > 0 {
|
||||
if err := waitForNominatedNodeAnnotation(cs, preemptor); err != nil {
|
||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v: %v", test.description, preemptor.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
pods = append(pods, preemptor)
|
||||
cleanupPods(cs, t, pods)
|
||||
}
|
||||
}
|
||||
|
||||
func mkPriorityPodWithGrace(tc *TestContext, name string, priority int32, grace int64) *v1.Pod {
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI)},
|
||||
}
|
||||
pod := initPausePod(tc.clientSet, &pausePodConfig{
|
||||
Name: name,
|
||||
Namespace: tc.ns.Name,
|
||||
Priority: &priority,
|
||||
Labels: map[string]string{"pod": name},
|
||||
Resources: defaultPodRes,
|
||||
})
|
||||
// Setting grace period to zero. Otherwise, we may never see the actual deletion
|
||||
// of the pods in integration tests.
|
||||
pod.Spec.TerminationGracePeriodSeconds = &grace
|
||||
return pod
|
||||
}
|
||||
|
||||
// This test ensures that while the preempting pod is waiting for the victims to
|
||||
// terminate, other pending lower priority pods are not scheduled in the room created
|
||||
// after preemption and while the higher priority pods is not scheduled yet.
|
||||
func TestPreemptionStarvation(t *testing.T) {
|
||||
// Enable PodPriority feature gate.
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
|
||||
// Initialize scheduler.
|
||||
context := initTest(t, "preemption")
|
||||
defer cleanupTest(t, context)
|
||||
cs := context.clientSet
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
numExistingPod int
|
||||
numExpectedPending int
|
||||
preemptor *v1.Pod
|
||||
}{
|
||||
{
|
||||
// This test ensures that while the preempting pod is waiting for the victims
|
||||
// terminate, other lower priority pods are not scheduled in the room created
|
||||
// after preemption and while the higher priority pods is not scheduled yet.
|
||||
description: "starvation test: higher priority pod is scheduled before the lower priority ones",
|
||||
numExistingPod: 10,
|
||||
numExpectedPending: 5,
|
||||
preemptor: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
},
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
// Create a node with some resources and a label.
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.BinarySI),
|
||||
}
|
||||
_, err := createNode(context.clientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
pendingPods := make([]*v1.Pod, test.numExpectedPending)
|
||||
numRunningPods := test.numExistingPod - test.numExpectedPending
|
||||
runningPods := make([]*v1.Pod, numRunningPods)
|
||||
// Create and run existingPods.
|
||||
for i := 0; i < numRunningPods; i++ {
|
||||
runningPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(context, fmt.Sprintf("rpod-%v", i), mediumPriority, 0))
|
||||
if err != nil {
|
||||
t.Fatalf("Test [%v]: Error creating pause pod: %v", test.description, err)
|
||||
}
|
||||
}
|
||||
// make sure that runningPods are all scheduled.
|
||||
for _, p := range runningPods {
|
||||
if err := waitForPodToSchedule(cs, p); err != nil {
|
||||
t.Fatalf("Pod %v didn't get scheduled: %v", p.Name, err)
|
||||
}
|
||||
}
|
||||
// Create pending pods.
|
||||
for i := 0; i < test.numExpectedPending; i++ {
|
||||
pendingPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(context, fmt.Sprintf("ppod-%v", i), mediumPriority, 0))
|
||||
if err != nil {
|
||||
t.Fatalf("Test [%v]: Error creating pending pod: %v", test.description, err)
|
||||
}
|
||||
}
|
||||
// Make sure that all pending pods are being marked unschedulable.
|
||||
for _, p := range pendingPods {
|
||||
if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout,
|
||||
podUnschedulable(cs, p.Namespace, p.Name)); err != nil {
|
||||
t.Errorf("Pod %v didn't get marked unschedulable: %v", p.Name, err)
|
||||
}
|
||||
}
|
||||
// Create the preemptor.
|
||||
preemptor, err := createPausePod(cs, test.preemptor)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the preempting pod: %v", err)
|
||||
}
|
||||
// Check that the preemptor pod gets the annotation for nominated node name.
|
||||
if err := waitForNominatedNodeAnnotation(cs, preemptor); err != nil {
|
||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v: %v", test.description, preemptor.Name, err)
|
||||
}
|
||||
// Make sure that preemptor is scheduled after preemptions.
|
||||
if err := waitForPodToScheduleWithTimeout(cs, preemptor, 60*time.Second); err != nil {
|
||||
t.Errorf("Preemptor pod %v didn't get scheduled: %v", preemptor.Name, err)
|
||||
}
|
||||
// Cleanup
|
||||
glog.Info("Cleaning up all pods...")
|
||||
allPods := pendingPods
|
||||
allPods = append(allPods, runningPods...)
|
||||
allPods = append(allPods, preemptor)
|
||||
cleanupPods(cs, t, allPods)
|
||||
}
|
||||
}
|
||||
|
||||
// TestNominatedNodeCleanUp checks that when there are nominated pods on a
|
||||
// node and a higher priority pod is nominated to run on the node, the nominated
|
||||
// node name of the lower priority pods is cleared.
|
||||
// Test scenario:
|
||||
// 1. Create a few low priority pods with long grade period that fill up a node.
|
||||
// 2. Create a medium priority pod that preempt some of those pods.
|
||||
// 3. Check that nominated node name of the medium priority pod is set.
|
||||
// 4. Create a high priority pod that preempts some pods on that node.
|
||||
// 5. Check that nominated node name of the high priority pod is set and nominated
|
||||
// node name of the medium priority pod is cleared.
|
||||
func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
// Enable PodPriority feature gate.
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
|
||||
// Initialize scheduler.
|
||||
context := initTest(t, "preemption")
|
||||
defer cleanupTest(t, context)
|
||||
cs := context.clientSet
|
||||
// Create a node with some resources and a label.
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.BinarySI),
|
||||
}
|
||||
_, err := createNode(context.clientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
|
||||
// Step 1. Create a few low priority pods.
|
||||
lowPriPods := make([]*v1.Pod, 4)
|
||||
for i := 0; i < len(lowPriPods); i++ {
|
||||
lowPriPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(context, fmt.Sprintf("lpod-%v", i), lowPriority, 60))
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating pause pod: %v", err)
|
||||
}
|
||||
}
|
||||
// make sure that the pods are all scheduled.
|
||||
for _, p := range lowPriPods {
|
||||
if err := waitForPodToSchedule(cs, p); err != nil {
|
||||
t.Fatalf("Pod %v didn't get scheduled: %v", p.Name, err)
|
||||
}
|
||||
}
|
||||
// Step 2. Create a medium priority pod.
|
||||
podConf := initPausePod(cs, &pausePodConfig{
|
||||
Name: "medium-priority",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &mediumPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(400, resource.BinarySI)},
|
||||
},
|
||||
})
|
||||
medPriPod, err := createPausePod(cs, podConf)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the medium priority pod: %v", err)
|
||||
}
|
||||
// Step 3. Check that nominated node name of the medium priority pod is set.
|
||||
if err := waitForNominatedNodeAnnotation(cs, medPriPod); err != nil {
|
||||
t.Errorf("NominatedNodeName annotation was not set for pod %v: %v", medPriPod.Name, err)
|
||||
}
|
||||
// Step 4. Create a high priority pod.
|
||||
podConf = initPausePod(cs, &pausePodConfig{
|
||||
Name: "high-priority",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
},
|
||||
})
|
||||
highPriPod, err := createPausePod(cs, podConf)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the high priority pod: %v", err)
|
||||
}
|
||||
// Step 5. Check that nominated node name of the high priority pod is set.
|
||||
if err := waitForNominatedNodeAnnotation(cs, highPriPod); err != nil {
|
||||
t.Errorf("NominatedNodeName annotation was not set for pod %v: %v", medPriPod.Name, err)
|
||||
}
|
||||
// And the nominated node name of the medium priority pod is cleared.
|
||||
if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
pod, err := cs.CoreV1().Pods(medPriPod.Namespace).Get(medPriPod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Error getting the medium priority pod info: %v", err)
|
||||
}
|
||||
n, found := pod.Annotations[core.NominatedNodeAnnotationKey]
|
||||
if !found || len(n) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}); err != nil {
|
||||
t.Errorf("The nominated node name of the medium priority pod was not cleared: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func mkMinAvailablePDB(name, namespace string, minAvailable int, matchLabels map[string]string) *policy.PodDisruptionBudget {
|
||||
intMinAvailable := intstr.FromInt(minAvailable)
|
||||
return &policy.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: policy.PodDisruptionBudgetSpec{
|
||||
MinAvailable: &intMinAvailable,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: matchLabels},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TestPDBInPreemption tests PodDisruptionBudget support in preemption.
|
||||
func TestPDBInPreemption(t *testing.T) {
|
||||
// Enable PodPriority feature gate.
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
|
||||
// Initialize scheduler.
|
||||
context := initTest(t, "preemption-pdb")
|
||||
defer cleanupTest(t, context)
|
||||
cs := context.clientSet
|
||||
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI)},
|
||||
}
|
||||
defaultNodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.BinarySI),
|
||||
}
|
||||
|
||||
type nodeConfig struct {
|
||||
name string
|
||||
res *v1.ResourceList
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
nodes []*nodeConfig
|
||||
pdbs []*policy.PodDisruptionBudget
|
||||
existingPods []*v1.Pod
|
||||
pod *v1.Pod
|
||||
preemptedPodIndexes map[int]struct{}
|
||||
}{
|
||||
{
|
||||
description: "A non-PDB violating pod is preempted despite its higher priority",
|
||||
nodes: []*nodeConfig{{name: "node-1", res: defaultNodeRes}},
|
||||
pdbs: []*policy.PodDisruptionBudget{
|
||||
mkMinAvailablePDB("pdb-1", context.ns.Name, 2, map[string]string{"foo": "bar"}),
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "low-pod1",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
}),
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "low-pod2",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
}),
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "mid-pod3",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &mediumPriority,
|
||||
Resources: defaultPodRes,
|
||||
}),
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
},
|
||||
}),
|
||||
preemptedPodIndexes: map[int]struct{}{2: {}},
|
||||
},
|
||||
{
|
||||
description: "A node without any PDB violating pods is preferred for preemption",
|
||||
nodes: []*nodeConfig{
|
||||
{name: "node-1", res: defaultNodeRes},
|
||||
{name: "node-2", res: defaultNodeRes},
|
||||
},
|
||||
pdbs: []*policy.PodDisruptionBudget{
|
||||
mkMinAvailablePDB("pdb-1", context.ns.Name, 2, map[string]string{"foo": "bar"}),
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "low-pod1",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-1",
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
}),
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "mid-pod2",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &mediumPriority,
|
||||
NodeName: "node-2",
|
||||
Resources: defaultPodRes,
|
||||
}),
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
},
|
||||
}),
|
||||
preemptedPodIndexes: map[int]struct{}{1: {}},
|
||||
},
|
||||
{
|
||||
description: "A node with fewer PDB violating pods is preferred for preemption",
|
||||
nodes: []*nodeConfig{
|
||||
{name: "node-1", res: defaultNodeRes},
|
||||
{name: "node-2", res: defaultNodeRes},
|
||||
{name: "node-3", res: defaultNodeRes},
|
||||
},
|
||||
pdbs: []*policy.PodDisruptionBudget{
|
||||
mkMinAvailablePDB("pdb-1", context.ns.Name, 2, map[string]string{"foo1": "bar"}),
|
||||
mkMinAvailablePDB("pdb-2", context.ns.Name, 2, map[string]string{"foo2": "bar"}),
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "low-pod1",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-1",
|
||||
Labels: map[string]string{"foo1": "bar"},
|
||||
}),
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "mid-pod1",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &mediumPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-1",
|
||||
}),
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "low-pod2",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-2",
|
||||
Labels: map[string]string{"foo2": "bar"},
|
||||
}),
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "mid-pod2",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &mediumPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-2",
|
||||
Labels: map[string]string{"foo2": "bar"},
|
||||
}),
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "low-pod4",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-3",
|
||||
Labels: map[string]string{"foo2": "bar"},
|
||||
}),
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "low-pod5",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-3",
|
||||
Labels: map[string]string{"foo2": "bar"},
|
||||
}),
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "low-pod6",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-3",
|
||||
Labels: map[string]string{"foo2": "bar"},
|
||||
}),
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
},
|
||||
}),
|
||||
preemptedPodIndexes: map[int]struct{}{0: {}, 1: {}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for _, nodeConf := range test.nodes {
|
||||
_, err := createNode(cs, nodeConf.name, nodeConf.res)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating node %v: %v", nodeConf.name, err)
|
||||
}
|
||||
}
|
||||
// Create PDBs.
|
||||
for _, pdb := range test.pdbs {
|
||||
_, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).Create(pdb)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create PDB: %v", err)
|
||||
}
|
||||
}
|
||||
// Wait for PDBs to show up in the scheduler's cache.
|
||||
if err := wait.Poll(time.Second, 15*time.Second, func() (bool, error) {
|
||||
cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
|
||||
if err != nil {
|
||||
t.Errorf("Error while polling for PDB: %v", err)
|
||||
return false, err
|
||||
}
|
||||
return len(cachedPDBs) == len(test.pdbs), err
|
||||
}); err != nil {
|
||||
t.Fatalf("Not all PDBs were added to the cache: %v", err)
|
||||
}
|
||||
|
||||
pods := make([]*v1.Pod, len(test.existingPods))
|
||||
var err error
|
||||
// Create and run existingPods.
|
||||
for i, p := range test.existingPods {
|
||||
if pods[i], err = runPausePod(cs, p); err != nil {
|
||||
t.Fatalf("Test [%v]: Error running pause pod: %v", test.description, err)
|
||||
}
|
||||
}
|
||||
// Create the "pod".
|
||||
preemptor, err := createPausePod(cs, test.pod)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating high priority pod: %v", err)
|
||||
}
|
||||
// Wait for preemption of pods and make sure the other ones are not preempted.
|
||||
for i, p := range pods {
|
||||
if _, found := test.preemptedPodIndexes[i]; found {
|
||||
if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
|
||||
t.Errorf("Test [%v]: Pod %v is not getting evicted.", test.description, p.Name)
|
||||
}
|
||||
} else {
|
||||
if p.DeletionTimestamp != nil {
|
||||
t.Errorf("Test [%v]: Didn't expect pod %v to get preempted.", test.description, p.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Also check that the preemptor pod gets the annotation for nominated node name.
|
||||
if len(test.preemptedPodIndexes) > 0 {
|
||||
if err := waitForNominatedNodeAnnotation(cs, preemptor); err != nil {
|
||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v: %v", test.description, preemptor.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
pods = append(pods, preemptor)
|
||||
cleanupPods(cs, t, pods)
|
||||
cs.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).DeleteCollection(nil, metav1.ListOptions{})
|
||||
cs.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
}
|
||||
}
|
174
vendor/k8s.io/kubernetes/test/integration/scheduler/priorities_test.go
generated
vendored
Normal file
174
vendor/k8s.io/kubernetes/test/integration/scheduler/priorities_test.go
generated
vendored
Normal file
@ -0,0 +1,174 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scheduler
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
// This file tests the scheduler priority functions.
|
||||
|
||||
// TestNodeAffinity verifies that scheduler's node affinity priority function
|
||||
// works correctly.
|
||||
func TestNodeAffinity(t *testing.T) {
|
||||
context := initTest(t, "node-affinity")
|
||||
defer cleanupTest(t, context)
|
||||
// Add a few nodes.
|
||||
nodes, err := createNodes(context.clientSet, "testnode", nil, 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
// Add a label to one of the nodes.
|
||||
labeledNode := nodes[1]
|
||||
labelKey := "kubernetes.io/node-topologyKey"
|
||||
labelValue := "topologyvalue"
|
||||
labels := map[string]string{
|
||||
labelKey: labelValue,
|
||||
}
|
||||
if err = testutils.AddLabelsToNode(context.clientSet, labeledNode.Name, labels); err != nil {
|
||||
t.Fatalf("Cannot add labels to node: %v", err)
|
||||
}
|
||||
if err = waitForNodeLabels(context.clientSet, labeledNode.Name, labels); err != nil {
|
||||
t.Fatalf("Adding labels to node didn't succeed: %v", err)
|
||||
}
|
||||
// Create a pod with node affinity.
|
||||
podName := "pod-with-node-affinity"
|
||||
pod, err := runPausePod(context.clientSet, initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: podName,
|
||||
Namespace: context.ns.Name,
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
|
||||
{
|
||||
Preference: v1.NodeSelectorTerm{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: labelKey,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{labelValue},
|
||||
},
|
||||
},
|
||||
},
|
||||
Weight: 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}))
|
||||
if err != nil {
|
||||
t.Fatalf("Error running pause pod: %v", err)
|
||||
}
|
||||
if pod.Spec.NodeName != labeledNode.Name {
|
||||
t.Errorf("Pod %v got scheduled on an unexpected node: %v. Expected node: %v.", podName, pod.Spec.NodeName, labeledNode.Name)
|
||||
} else {
|
||||
t.Logf("Pod %v got successfully scheduled on node %v.", podName, pod.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
|
||||
// TestPodAffinity verifies that scheduler's pod affinity priority function
|
||||
// works correctly.
|
||||
func TestPodAffinity(t *testing.T) {
|
||||
context := initTest(t, "pod-affinity")
|
||||
defer cleanupTest(t, context)
|
||||
// Add a few nodes.
|
||||
nodesInTopology, err := createNodes(context.clientSet, "in-topology", nil, 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
topologyKey := "node-topologykey"
|
||||
topologyValue := "topologyvalue"
|
||||
nodeLabels := map[string]string{
|
||||
topologyKey: topologyValue,
|
||||
}
|
||||
for _, node := range nodesInTopology {
|
||||
// Add topology key to all the nodes.
|
||||
if err = testutils.AddLabelsToNode(context.clientSet, node.Name, nodeLabels); err != nil {
|
||||
t.Fatalf("Cannot add labels to node %v: %v", node.Name, err)
|
||||
}
|
||||
if err = waitForNodeLabels(context.clientSet, node.Name, nodeLabels); err != nil {
|
||||
t.Fatalf("Adding labels to node %v didn't succeed: %v", node.Name, err)
|
||||
}
|
||||
}
|
||||
// Add a pod with a label and wait for it to schedule.
|
||||
labelKey := "service"
|
||||
labelValue := "S1"
|
||||
_, err = runPausePod(context.clientSet, initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "attractor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Labels: map[string]string{labelKey: labelValue},
|
||||
}))
|
||||
if err != nil {
|
||||
t.Fatalf("Error running the attractor pod: %v", err)
|
||||
}
|
||||
// Add a few more nodes without the topology label.
|
||||
_, err = createNodes(context.clientSet, "other-node", nil, 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create the second set of nodes: %v", err)
|
||||
}
|
||||
// Add a new pod with affinity to the attractor pod.
|
||||
podName := "pod-with-podaffinity"
|
||||
pod, err := runPausePod(context.clientSet, initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: podName,
|
||||
Namespace: context.ns.Name,
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{
|
||||
{
|
||||
PodAffinityTerm: v1.PodAffinityTerm{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: labelKey,
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{labelValue, "S3"},
|
||||
},
|
||||
{
|
||||
Key: labelKey,
|
||||
Operator: metav1.LabelSelectorOpNotIn,
|
||||
Values: []string{"S2"},
|
||||
}, {
|
||||
Key: labelKey,
|
||||
Operator: metav1.LabelSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: topologyKey,
|
||||
Namespaces: []string{context.ns.Name},
|
||||
},
|
||||
Weight: 50,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}))
|
||||
if err != nil {
|
||||
t.Fatalf("Error running pause pod: %v", err)
|
||||
}
|
||||
// The new pod must be scheduled on one of the nodes with the same topology
|
||||
// key-value as the attractor pod.
|
||||
for _, node := range nodesInTopology {
|
||||
if node.Name == pod.Spec.NodeName {
|
||||
t.Logf("Pod %v got successfully scheduled on node %v.", podName, pod.Spec.NodeName)
|
||||
return
|
||||
}
|
||||
}
|
||||
t.Errorf("Pod %v got scheduled on an unexpected node: %v.", podName, pod.Spec.NodeName)
|
||||
}
|
704
vendor/k8s.io/kubernetes/test/integration/scheduler/scheduler_test.go
generated
vendored
Normal file
704
vendor/k8s.io/kubernetes/test/integration/scheduler/scheduler_test.go
generated
vendored
Normal file
@ -0,0 +1,704 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scheduler
|
||||
|
||||
// This file tests the scheduler.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientv1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
schedulerapp "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
const enableEquivalenceCache = true
|
||||
|
||||
type nodeMutationFunc func(t *testing.T, n *v1.Node, nodeLister corelisters.NodeLister, c clientset.Interface)
|
||||
|
||||
type nodeStateManager struct {
|
||||
makeSchedulable nodeMutationFunc
|
||||
makeUnSchedulable nodeMutationFunc
|
||||
}
|
||||
|
||||
func PredicateOne(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
func PredicateTwo(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
func PriorityOne(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
|
||||
return []schedulerapi.HostPriority{}, nil
|
||||
}
|
||||
|
||||
func PriorityTwo(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
|
||||
return []schedulerapi.HostPriority{}, nil
|
||||
}
|
||||
|
||||
// TestSchedulerCreationFromConfigMap verifies that scheduler can be created
|
||||
// from configurations provided by a ConfigMap object and then verifies that the
|
||||
// configuration is applied correctly.
|
||||
func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("configmap", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
defer clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
|
||||
// Pre-register some predicate and priority functions
|
||||
factory.RegisterFitPredicate("PredicateOne", PredicateOne)
|
||||
factory.RegisterFitPredicate("PredicateTwo", PredicateTwo)
|
||||
factory.RegisterPriorityFunction("PriorityOne", PriorityOne, 1)
|
||||
factory.RegisterPriorityFunction("PriorityTwo", PriorityTwo, 1)
|
||||
|
||||
// Add a ConfigMap object.
|
||||
configPolicyName := "scheduler-custom-policy-config"
|
||||
policyConfigMap := v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: configPolicyName},
|
||||
Data: map[string]string{
|
||||
componentconfig.SchedulerPolicyConfigMapKey: `{
|
||||
"kind" : "Policy",
|
||||
"apiVersion" : "v1",
|
||||
"predicates" : [
|
||||
{"name" : "PredicateOne"},
|
||||
{"name" : "PredicateTwo"}
|
||||
],
|
||||
"priorities" : [
|
||||
{"name" : "PriorityOne", "weight" : 1},
|
||||
{"name" : "PriorityTwo", "weight" : 5}
|
||||
]
|
||||
}`,
|
||||
},
|
||||
}
|
||||
|
||||
policyConfigMap.APIVersion = testapi.Groups[v1.GroupName].GroupVersion().String()
|
||||
clientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(&policyConfigMap)
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.CoreV1().RESTClient()).Events("")})
|
||||
|
||||
ss := &schedulerapp.SchedulerServer{
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
Namespace: policyConfigMap.Namespace,
|
||||
Name: policyConfigMap.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
Client: clientSet,
|
||||
InformerFactory: informerFactory,
|
||||
PodInformer: factory.NewPodInformer(clientSet, 0, v1.DefaultSchedulerName),
|
||||
EventClient: clientSet.CoreV1(),
|
||||
Recorder: eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
|
||||
Broadcaster: eventBroadcaster,
|
||||
}
|
||||
|
||||
config, err := ss.SchedulerConfig()
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't make scheduler config: %v", err)
|
||||
}
|
||||
|
||||
// Verify that the config is applied correctly.
|
||||
schedPredicates := config.Algorithm.Predicates()
|
||||
schedPrioritizers := config.Algorithm.Prioritizers()
|
||||
// Includes one mandatory predicates.
|
||||
if len(schedPredicates) != 3 || len(schedPrioritizers) != 2 {
|
||||
t.Errorf("Unexpected number of predicates or priority functions. Number of predicates: %v, number of prioritizers: %v", len(schedPredicates), len(schedPrioritizers))
|
||||
}
|
||||
// Check a predicate and a priority function.
|
||||
if schedPredicates["PredicateTwo"] == nil {
|
||||
t.Errorf("Expected to have a PodFitsHostPorts predicate.")
|
||||
}
|
||||
if schedPrioritizers[1].Function == nil || schedPrioritizers[1].Weight != 5 {
|
||||
t.Errorf("Unexpected prioritizer: func: %v, weight: %v", schedPrioritizers[1].Function, schedPrioritizers[1].Weight)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSchedulerCreationFromNonExistentConfigMap ensures that creation of the
|
||||
// scheduler from a non-existent ConfigMap fails.
|
||||
func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("configmap", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
defer clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.CoreV1().RESTClient()).Events("")})
|
||||
|
||||
ss := &schedulerapp.SchedulerServer{
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
Namespace: "non-existent-config",
|
||||
Name: "non-existent-config",
|
||||
},
|
||||
},
|
||||
},
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
Client: clientSet,
|
||||
InformerFactory: informerFactory,
|
||||
PodInformer: factory.NewPodInformer(clientSet, 0, v1.DefaultSchedulerName),
|
||||
EventClient: clientSet.CoreV1(),
|
||||
Recorder: eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
|
||||
Broadcaster: eventBroadcaster,
|
||||
}
|
||||
|
||||
_, err := ss.SchedulerConfig()
|
||||
if err == nil {
|
||||
t.Fatalf("Creation of scheduler didn't fail while the policy ConfigMap didn't exist.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnschedulableNodes(t *testing.T) {
|
||||
context := initTest(t, "unschedulable-nodes")
|
||||
defer cleanupTest(t, context)
|
||||
|
||||
nodeLister := context.schedulerConfigFactory.GetNodeLister()
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (Nodes).
|
||||
defer context.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
|
||||
goodCondition := v1.NodeCondition{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: fmt.Sprintf("schedulable condition"),
|
||||
LastHeartbeatTime: metav1.Time{Time: time.Now()},
|
||||
}
|
||||
badCondition := v1.NodeCondition{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionUnknown,
|
||||
Reason: fmt.Sprintf("unschedulable condition"),
|
||||
LastHeartbeatTime: metav1.Time{Time: time.Now()},
|
||||
}
|
||||
// Create a new schedulable node, since we're first going to apply
|
||||
// the unschedulable condition and verify that pods aren't scheduled.
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node-scheduling-test-node"},
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
},
|
||||
Conditions: []v1.NodeCondition{goodCondition},
|
||||
},
|
||||
}
|
||||
nodeKey, err := cache.MetaNamespaceKeyFunc(node)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't retrieve key for node %v", node.Name)
|
||||
}
|
||||
|
||||
// The test does the following for each nodeStateManager in this list:
|
||||
// 1. Create a new node
|
||||
// 2. Apply the makeUnSchedulable function
|
||||
// 3. Create a new pod
|
||||
// 4. Check that the pod doesn't get assigned to the node
|
||||
// 5. Apply the schedulable function
|
||||
// 6. Check that the pod *does* get assigned to the node
|
||||
// 7. Delete the pod and node.
|
||||
|
||||
nodeModifications := []nodeStateManager{
|
||||
// Test node.Spec.Unschedulable=true/false
|
||||
{
|
||||
makeUnSchedulable: func(t *testing.T, n *v1.Node, nodeLister corelisters.NodeLister, c clientset.Interface) {
|
||||
n.Spec.Unschedulable = true
|
||||
if _, err := c.CoreV1().Nodes().Update(n); err != nil {
|
||||
t.Fatalf("Failed to update node with unschedulable=true: %v", err)
|
||||
}
|
||||
err = waitForReflection(t, nodeLister, nodeKey, func(node interface{}) bool {
|
||||
// An unschedulable node should still be present in the store
|
||||
// Nodes that are unschedulable or that are not ready or
|
||||
// have their disk full (Node.Spec.Conditions) are excluded
|
||||
// based on NodeConditionPredicate, a separate check
|
||||
return node != nil && node.(*v1.Node).Spec.Unschedulable == true
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to observe reflected update for setting unschedulable=true: %v", err)
|
||||
}
|
||||
},
|
||||
makeSchedulable: func(t *testing.T, n *v1.Node, nodeLister corelisters.NodeLister, c clientset.Interface) {
|
||||
n.Spec.Unschedulable = false
|
||||
if _, err := c.CoreV1().Nodes().Update(n); err != nil {
|
||||
t.Fatalf("Failed to update node with unschedulable=false: %v", err)
|
||||
}
|
||||
err = waitForReflection(t, nodeLister, nodeKey, func(node interface{}) bool {
|
||||
return node != nil && node.(*v1.Node).Spec.Unschedulable == false
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to observe reflected update for setting unschedulable=false: %v", err)
|
||||
}
|
||||
},
|
||||
},
|
||||
// Test node.Status.Conditions=ConditionTrue/Unknown
|
||||
{
|
||||
makeUnSchedulable: func(t *testing.T, n *v1.Node, nodeLister corelisters.NodeLister, c clientset.Interface) {
|
||||
n.Status = v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
},
|
||||
Conditions: []v1.NodeCondition{badCondition},
|
||||
}
|
||||
if _, err = c.CoreV1().Nodes().UpdateStatus(n); err != nil {
|
||||
t.Fatalf("Failed to update node with bad status condition: %v", err)
|
||||
}
|
||||
err = waitForReflection(t, nodeLister, nodeKey, func(node interface{}) bool {
|
||||
return node != nil && node.(*v1.Node).Status.Conditions[0].Status == v1.ConditionUnknown
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to observe reflected update for status condition update: %v", err)
|
||||
}
|
||||
},
|
||||
makeSchedulable: func(t *testing.T, n *v1.Node, nodeLister corelisters.NodeLister, c clientset.Interface) {
|
||||
n.Status = v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
},
|
||||
Conditions: []v1.NodeCondition{goodCondition},
|
||||
}
|
||||
if _, err = c.CoreV1().Nodes().UpdateStatus(n); err != nil {
|
||||
t.Fatalf("Failed to update node with healthy status condition: %v", err)
|
||||
}
|
||||
err = waitForReflection(t, nodeLister, nodeKey, func(node interface{}) bool {
|
||||
return node != nil && node.(*v1.Node).Status.Conditions[0].Status == v1.ConditionTrue
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to observe reflected update for status condition update: %v", err)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, mod := range nodeModifications {
|
||||
unSchedNode, err := context.clientSet.CoreV1().Nodes().Create(node)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
|
||||
// Apply the unschedulable modification to the node, and wait for the reflection
|
||||
mod.makeUnSchedulable(t, unSchedNode, nodeLister, context.clientSet)
|
||||
|
||||
// Create the new pod, note that this needs to happen post unschedulable
|
||||
// modification or we have a race in the test.
|
||||
myPod, err := createPausePodWithResource(context.clientSet, "node-scheduling-test-pod", context.ns.Name, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
// There are no schedulable nodes - the pod shouldn't be scheduled.
|
||||
err = waitForPodToScheduleWithTimeout(context.clientSet, myPod, 2*time.Second)
|
||||
if err == nil {
|
||||
t.Errorf("Pod scheduled successfully on unschedulable nodes")
|
||||
}
|
||||
if err != wait.ErrWaitTimeout {
|
||||
t.Errorf("Test %d: failed while trying to confirm the pod does not get scheduled on the node: %v", i, err)
|
||||
} else {
|
||||
t.Logf("Test %d: Pod did not get scheduled on an unschedulable node", i)
|
||||
}
|
||||
|
||||
// Apply the schedulable modification to the node, and wait for the reflection
|
||||
schedNode, err := context.clientSet.CoreV1().Nodes().Get(unSchedNode.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get node: %v", err)
|
||||
}
|
||||
mod.makeSchedulable(t, schedNode, nodeLister, context.clientSet)
|
||||
|
||||
// Wait until the pod is scheduled.
|
||||
if err := waitForPodToSchedule(context.clientSet, myPod); err != nil {
|
||||
t.Errorf("Test %d: failed to schedule a pod: %v", i, err)
|
||||
} else {
|
||||
t.Logf("Test %d: Pod got scheduled on a schedulable node", i)
|
||||
}
|
||||
// Clean up.
|
||||
if err := deletePod(context.clientSet, myPod.Name, myPod.Namespace); err != nil {
|
||||
t.Errorf("Failed to delete pod: %v", err)
|
||||
}
|
||||
err = context.clientSet.CoreV1().Nodes().Delete(schedNode.Name, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete node: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiScheduler(t *testing.T) {
|
||||
/*
|
||||
This integration tests the multi-scheduler feature in the following way:
|
||||
1. create a default scheduler
|
||||
2. create a node
|
||||
3. create 3 pods: testPodNoAnnotation, testPodWithAnnotationFitsDefault and testPodWithAnnotationFitsFoo
|
||||
- note: the first two should be picked and scheduled by default scheduler while the last one should be
|
||||
picked by scheduler of name "foo-scheduler" which does not exist yet.
|
||||
4. **check point-1**:
|
||||
- testPodNoAnnotation, testPodWithAnnotationFitsDefault should be scheduled
|
||||
- testPodWithAnnotationFitsFoo should NOT be scheduled
|
||||
5. create a scheduler with name "foo-scheduler"
|
||||
6. **check point-2**:
|
||||
- testPodWithAnnotationFitsFoo should be scheduled
|
||||
7. stop default scheduler
|
||||
8. create 2 pods: testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2
|
||||
- note: these two pods belong to default scheduler which no longer exists
|
||||
9. **check point-3**:
|
||||
- testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 should NOT be scheduled
|
||||
*/
|
||||
|
||||
// 1. create and start default-scheduler
|
||||
context := initTest(t, "multi-scheduler")
|
||||
defer cleanupTest(t, context)
|
||||
|
||||
// 2. create a node
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node-multi-scheduler-test-node"},
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
}
|
||||
context.clientSet.CoreV1().Nodes().Create(node)
|
||||
|
||||
// 3. create 3 pods for testing
|
||||
t.Logf("create 3 pods for testing")
|
||||
testPod, err := createPausePodWithResource(context.clientSet, "pod-without-scheduler-name", context.ns.Name, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
defaultScheduler := "default-scheduler"
|
||||
testPodFitsDefault, err := createPausePod(context.clientSet, initPausePod(context.clientSet, &pausePodConfig{Name: "pod-fits-default", Namespace: context.ns.Name, SchedulerName: defaultScheduler}))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
fooScheduler := "foo-scheduler"
|
||||
testPodFitsFoo, err := createPausePod(context.clientSet, initPausePod(context.clientSet, &pausePodConfig{Name: "pod-fits-foo", Namespace: context.ns.Name, SchedulerName: fooScheduler}))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
// 4. **check point-1**:
|
||||
// - testPod, testPodFitsDefault should be scheduled
|
||||
// - testPodFitsFoo should NOT be scheduled
|
||||
t.Logf("wait for pods scheduled")
|
||||
if err := waitForPodToSchedule(context.clientSet, testPod); err != nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPod.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod scheduled", testPod.Name)
|
||||
}
|
||||
|
||||
if err := waitForPodToSchedule(context.clientSet, testPodFitsDefault); err != nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPodFitsDefault.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodFitsDefault.Name)
|
||||
}
|
||||
|
||||
if err := waitForPodToScheduleWithTimeout(context.clientSet, testPodFitsFoo, time.Second*5); err == nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodFitsFoo.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod not scheduled", testPodFitsFoo.Name)
|
||||
}
|
||||
|
||||
// 5. create and start a scheduler with name "foo-scheduler"
|
||||
clientSet2 := clientset.NewForConfigOrDie(&restclient.Config{Host: context.httpServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
informerFactory2 := informers.NewSharedInformerFactory(context.clientSet, 0)
|
||||
podInformer2 := factory.NewPodInformer(context.clientSet, 0, fooScheduler)
|
||||
|
||||
schedulerConfigFactory2 := factory.NewConfigFactory(
|
||||
fooScheduler,
|
||||
clientSet2,
|
||||
informerFactory2.Core().V1().Nodes(),
|
||||
podInformer2,
|
||||
informerFactory2.Core().V1().PersistentVolumes(),
|
||||
informerFactory2.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory2.Core().V1().ReplicationControllers(),
|
||||
informerFactory2.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory2.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory2.Core().V1().Services(),
|
||||
informerFactory2.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory2.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
enableEquivalenceCache,
|
||||
)
|
||||
schedulerConfig2, err := schedulerConfigFactory2.Create()
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster2 := record.NewBroadcaster()
|
||||
schedulerConfig2.Recorder = eventBroadcaster2.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: fooScheduler})
|
||||
eventBroadcaster2.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet2.CoreV1().RESTClient()).Events("")})
|
||||
go podInformer2.Informer().Run(schedulerConfig2.StopEverything)
|
||||
informerFactory2.Start(schedulerConfig2.StopEverything)
|
||||
|
||||
sched2, _ := scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{Config: schedulerConfig2}, nil...)
|
||||
sched2.Run()
|
||||
defer close(schedulerConfig2.StopEverything)
|
||||
|
||||
// 6. **check point-2**:
|
||||
// - testPodWithAnnotationFitsFoo should be scheduled
|
||||
err = waitForPodToSchedule(context.clientSet, testPodFitsFoo)
|
||||
if err != nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod not scheduled, %v", testPodFitsFoo.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodFitsFoo.Name)
|
||||
}
|
||||
|
||||
// 7. delete the pods that were scheduled by the default scheduler, and stop the default scheduler
|
||||
if err := deletePod(context.clientSet, testPod.Name, context.ns.Name); err != nil {
|
||||
t.Errorf("Failed to delete pod: %v", err)
|
||||
}
|
||||
if err := deletePod(context.clientSet, testPodFitsDefault.Name, context.ns.Name); err != nil {
|
||||
t.Errorf("Failed to delete pod: %v", err)
|
||||
}
|
||||
|
||||
// The rest of this test assumes that closing StopEverything will cause the
|
||||
// scheduler thread to stop immediately. It won't, and in fact it will often
|
||||
// schedule 1 more pod before finally exiting. Comment out until we fix that.
|
||||
//
|
||||
// See https://github.com/kubernetes/kubernetes/issues/23715 for more details.
|
||||
|
||||
/*
|
||||
close(schedulerConfig.StopEverything)
|
||||
|
||||
// 8. create 2 pods: testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2
|
||||
// - note: these two pods belong to default scheduler which no longer exists
|
||||
podWithNoAnnotation2 := createPod("pod-with-no-annotation2", nil)
|
||||
podWithAnnotationFitsDefault2 := createPod("pod-with-annotation-fits-default2", schedulerAnnotationFitsDefault)
|
||||
testPodNoAnnotation2, err := clientSet.CoreV1().Pods(ns.Name).Create(podWithNoAnnotation2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
testPodWithAnnotationFitsDefault2, err := clientSet.CoreV1().Pods(ns.Name).Create(podWithAnnotationFitsDefault2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
// 9. **check point-3**:
|
||||
// - testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 should NOT be scheduled
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodNoAnnotation2.Namespace, testPodNoAnnotation2.Name))
|
||||
if err == nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodNoAnnotation2.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod not scheduled", testPodNoAnnotation2.Name)
|
||||
}
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodWithAnnotationFitsDefault2.Namespace, testPodWithAnnotationFitsDefault2.Name))
|
||||
if err == nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodWithAnnotationFitsDefault2.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodWithAnnotationFitsDefault2.Name)
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
// This test will verify scheduler can work well regardless of whether kubelet is allocatable aware or not.
|
||||
func TestAllocatable(t *testing.T) {
|
||||
context := initTest(t, "allocatable")
|
||||
defer cleanupTest(t, context)
|
||||
|
||||
// 2. create a node without allocatable awareness
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
|
||||
}
|
||||
allocNode, err := createNode(context.clientSet, "node-allocatable-scheduler-test-node", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
|
||||
// 3. create resource pod which requires less than Capacity
|
||||
podName := "pod-test-allocatable"
|
||||
podRes := &v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(20, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(20, resource.BinarySI),
|
||||
}
|
||||
testAllocPod, err := createPausePodWithResource(context.clientSet, podName, context.ns.Name, podRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Test allocatable unawareness failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
// 4. Test: this test pod should be scheduled since api-server will use Capacity as Allocatable
|
||||
err = waitForPodToScheduleWithTimeout(context.clientSet, testAllocPod, time.Second*5)
|
||||
if err != nil {
|
||||
t.Errorf("Test allocatable unawareness: %s Pod not scheduled: %v", testAllocPod.Name, err)
|
||||
} else {
|
||||
t.Logf("Test allocatable unawareness: %s Pod scheduled", testAllocPod.Name)
|
||||
}
|
||||
|
||||
// 5. Change the node status to allocatable aware, note that Allocatable is less than Pod's requirement
|
||||
allocNode.Status = v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := context.clientSet.CoreV1().Nodes().UpdateStatus(allocNode); err != nil {
|
||||
t.Fatalf("Failed to update node with Status.Allocatable: %v", err)
|
||||
}
|
||||
|
||||
if err := deletePod(context.clientSet, testAllocPod.Name, context.ns.Name); err != nil {
|
||||
t.Fatalf("Failed to remove the first pod: %v", err)
|
||||
}
|
||||
|
||||
// 6. Make another pod with different name, same resource request
|
||||
podName2 := "pod-test-allocatable2"
|
||||
testAllocPod2, err := createPausePodWithResource(context.clientSet, podName2, context.ns.Name, podRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Test allocatable awareness failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
// 7. Test: this test pod should not be scheduled since it request more than Allocatable
|
||||
if err := waitForPodToScheduleWithTimeout(context.clientSet, testAllocPod2, time.Second*5); err == nil {
|
||||
t.Errorf("Test allocatable awareness: %s Pod got scheduled unexpectedly, %v", testAllocPod2.Name, err)
|
||||
} else {
|
||||
t.Logf("Test allocatable awareness: %s Pod not scheduled as expected", testAllocPod2.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// TestPDBCache verifies that scheduler cache works as expected when handling
|
||||
// PodDisruptionBudget.
|
||||
func TestPDBCache(t *testing.T) {
|
||||
context := initTest(t, "pdbcache")
|
||||
defer cleanupTest(t, context)
|
||||
|
||||
intstrMin := intstr.FromInt(4)
|
||||
pdb := &policy.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: context.ns.Name,
|
||||
Name: "test-pdb",
|
||||
Labels: map[string]string{"tkey1": "tval1", "tkey2": "tval2"},
|
||||
},
|
||||
Spec: policy.PodDisruptionBudgetSpec{
|
||||
MinAvailable: &intstrMin,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"tkey": "tvalue"}},
|
||||
},
|
||||
}
|
||||
|
||||
createdPDB, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).Create(pdb)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create PDB: %v", err)
|
||||
}
|
||||
// Wait for PDB to show up in the scheduler's cache.
|
||||
if err = wait.Poll(time.Second, 15*time.Second, func() (bool, error) {
|
||||
cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
|
||||
if err != nil {
|
||||
t.Errorf("Error while polling for PDB: %v", err)
|
||||
return false, err
|
||||
}
|
||||
return len(cachedPDBs) > 0, err
|
||||
}); err != nil {
|
||||
t.Fatalf("No PDB was added to the cache: %v", err)
|
||||
}
|
||||
// Read PDB from the cache and compare it.
|
||||
cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
|
||||
if len(cachedPDBs) != 1 {
|
||||
t.Fatalf("Expected to have 1 pdb in cache, but found %d.", len(cachedPDBs))
|
||||
}
|
||||
if !reflect.DeepEqual(createdPDB, cachedPDBs[0]) {
|
||||
t.Errorf("Got different PDB than expected.\nDifference detected on:\n%s", diff.ObjectReflectDiff(createdPDB, cachedPDBs[0]))
|
||||
}
|
||||
|
||||
// Update PDB and change its labels.
|
||||
pdbCopy := *cachedPDBs[0]
|
||||
pdbCopy.Labels = map[string]string{}
|
||||
updatedPDB, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).Update(&pdbCopy)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to update PDB: %v", err)
|
||||
}
|
||||
// Wait for PDB to be updated in the scheduler's cache.
|
||||
if err = wait.Poll(time.Second, 15*time.Second, func() (bool, error) {
|
||||
cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
|
||||
if err != nil {
|
||||
t.Errorf("Error while polling for PDB: %v", err)
|
||||
return false, err
|
||||
}
|
||||
return len(cachedPDBs[0].Labels) == 0, err
|
||||
}); err != nil {
|
||||
t.Fatalf("No PDB was updated in the cache: %v", err)
|
||||
}
|
||||
// Read PDB from the cache and compare it.
|
||||
cachedPDBs, err = context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
|
||||
if len(cachedPDBs) != 1 {
|
||||
t.Errorf("Expected to have 1 pdb in cache, but found %d.", len(cachedPDBs))
|
||||
}
|
||||
if !reflect.DeepEqual(updatedPDB, cachedPDBs[0]) {
|
||||
t.Errorf("Got different PDB than expected.\nDifference detected on:\n%s", diff.ObjectReflectDiff(updatedPDB, cachedPDBs[0]))
|
||||
}
|
||||
|
||||
// Delete PDB.
|
||||
err = context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).Delete(pdb.Name, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete PDB: %v", err)
|
||||
}
|
||||
// Wait for PDB to be deleted from the scheduler's cache.
|
||||
if err = wait.Poll(time.Second, 15*time.Second, func() (bool, error) {
|
||||
cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
|
||||
if err != nil {
|
||||
t.Errorf("Error while polling for PDB: %v", err)
|
||||
return false, err
|
||||
}
|
||||
return len(cachedPDBs) == 0, err
|
||||
}); err != nil {
|
||||
t.Errorf("No PDB was deleted from the cache: %v", err)
|
||||
}
|
||||
}
|
303
vendor/k8s.io/kubernetes/test/integration/scheduler/taint_test.go
generated
vendored
Normal file
303
vendor/k8s.io/kubernetes/test/integration/scheduler/taint_test.go
generated
vendored
Normal file
@ -0,0 +1,303 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scheduler
|
||||
|
||||
// This file tests the Taint feature.
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
internalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
"k8s.io/kubernetes/pkg/controller/node"
|
||||
"k8s.io/kubernetes/pkg/controller/node/ipam"
|
||||
kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction"
|
||||
pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
// TestTaintNodeByCondition verifies:
|
||||
// 1. MemoryPressure Toleration is added to non-BestEffort Pod by PodTolerationRestriction
|
||||
// 2. NodeController taints nodes by node condition
|
||||
// 3. Scheduler allows pod to tolerate node condition taints, e.g. network unavailabe
|
||||
func TestTaintNodeByCondition(t *testing.T) {
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-h.Initialized
|
||||
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
|
||||
// Enable TaintNodeByCondition
|
||||
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=True")
|
||||
|
||||
// Build clientset and informers for controllers.
|
||||
internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
internalInformers := internalinformers.NewSharedInformerFactory(internalClientset, time.Second)
|
||||
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
informers := informers.NewSharedInformerFactory(clientset, time.Second)
|
||||
|
||||
// Build PodToleration Admission.
|
||||
admission := podtolerationrestriction.NewPodTolerationsPlugin(&pluginapi.Configuration{})
|
||||
kubeadmission.WantsInternalKubeClientSet(admission).SetInternalKubeClientSet(internalClientset)
|
||||
kubeadmission.WantsInternalKubeInformerFactory(admission).SetInternalKubeInformerFactory(internalInformers)
|
||||
|
||||
// Start master with admission.
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.AdmissionControl = admission
|
||||
_, _, closeFn := framework.RunAMasterUsingServer(masterConfig, s, h)
|
||||
defer closeFn()
|
||||
|
||||
nsName := "default"
|
||||
controllerCh := make(chan struct{})
|
||||
defer close(controllerCh)
|
||||
|
||||
// Start NodeController for taint.
|
||||
nc, err := node.NewNodeController(
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Core().V1().Nodes(),
|
||||
informers.Extensions().V1beta1().DaemonSets(),
|
||||
nil, // CloudProvider
|
||||
clientset,
|
||||
time.Second, // Pod eviction timeout
|
||||
100, // Eviction limiter QPS
|
||||
100, // Secondary eviction limiter QPS
|
||||
100, // Large cluster threshold
|
||||
100, // Unhealthy zone threshold
|
||||
time.Second, // Node monitor grace period
|
||||
time.Second, // Node startup grace period
|
||||
time.Second, // Node monitor period
|
||||
nil, // Cluster CIDR
|
||||
nil, // Service CIDR
|
||||
0, // Node CIDR mask size
|
||||
false, // Allocate node CIDRs
|
||||
ipam.RangeAllocatorType, // Allocator type
|
||||
true, // Run taint manger
|
||||
true, // Enabled taint based eviction
|
||||
true, // Enabled TaintNodeByCondition feature
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create node controller: %v", err)
|
||||
return
|
||||
}
|
||||
go nc.Run(controllerCh)
|
||||
|
||||
// Apply feature gates to enable TaintNodesByCondition
|
||||
algorithmprovider.ApplyFeatureGates()
|
||||
|
||||
// Start scheduler
|
||||
configurator := factory.NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
clientset,
|
||||
informers.Core().V1().Nodes(),
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Core().V1().PersistentVolumes(),
|
||||
informers.Core().V1().PersistentVolumeClaims(),
|
||||
informers.Core().V1().ReplicationControllers(),
|
||||
informers.Extensions().V1beta1().ReplicaSets(),
|
||||
informers.Apps().V1beta1().StatefulSets(),
|
||||
informers.Core().V1().Services(),
|
||||
informers.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informers.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
true, // Enable EqualCache by default.
|
||||
)
|
||||
|
||||
sched, err := scheduler.NewFromConfigurator(configurator, func(cfg *scheduler.Config) {
|
||||
cfg.StopEverything = controllerCh
|
||||
cfg.Recorder = &record.FakeRecorder{}
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create scheduler: %v.", err)
|
||||
return
|
||||
}
|
||||
go sched.Run()
|
||||
|
||||
// Waiting for all controller sync.
|
||||
informers.Start(controllerCh)
|
||||
internalInformers.Start(controllerCh)
|
||||
|
||||
informers.WaitForCacheSync(controllerCh)
|
||||
internalInformers.WaitForCacheSync(controllerCh)
|
||||
|
||||
// -------------------------------------------
|
||||
// Test TaintNodeByCondition feature.
|
||||
// -------------------------------------------
|
||||
memoryPressureToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeMemoryPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
// Case 1: Add MememoryPressure Toleration for non-BestEffort pod.
|
||||
burstablePod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "burstable-pod",
|
||||
Namespace: nsName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: "busybox",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
burstablePodInServ, err := clientset.CoreV1().Pods(nsName).Create(burstablePod)
|
||||
if err != nil {
|
||||
t.Errorf("Case 1: Failed to create pod: %v", err)
|
||||
} else if !reflect.DeepEqual(burstablePodInServ.Spec.Tolerations, []v1.Toleration{memoryPressureToleration}) {
|
||||
t.Errorf("Case 1: Unexpected toleration of non-BestEffort pod, expected: %+v, got: %v",
|
||||
[]v1.Toleration{memoryPressureToleration},
|
||||
burstablePodInServ.Spec.Tolerations)
|
||||
}
|
||||
|
||||
// Case 2: No MemoryPressure Toleration for BestEffort pod.
|
||||
besteffortPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "best-effort-pod",
|
||||
Namespace: nsName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: "busybox",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
besteffortPodInServ, err := clientset.CoreV1().Pods(nsName).Create(besteffortPod)
|
||||
if err != nil {
|
||||
t.Errorf("Case 2: Failed to create pod: %v", err)
|
||||
} else if len(besteffortPodInServ.Spec.Tolerations) != 0 {
|
||||
t.Errorf("Case 2: Unexpected toleration # of BestEffort pod, expected: 0, got: %v",
|
||||
len(besteffortPodInServ.Spec.Tolerations))
|
||||
}
|
||||
|
||||
// Case 3: Taint Node by NetworkUnavailable condition.
|
||||
networkUnavailableNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-1",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
v1.ResourcePods: resource.MustParse("110"),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
v1.ResourcePods: resource.MustParse("110"),
|
||||
},
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeNetworkUnavailable,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionFalse,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
nodeInformerCh := make(chan bool)
|
||||
nodeInformer := informers.Core().V1().Nodes().Informer()
|
||||
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
curNode := cur.(*v1.Node)
|
||||
for _, taint := range curNode.Spec.Taints {
|
||||
if taint.Key == algorithm.TaintNodeNetworkUnavailable &&
|
||||
taint.Effect == v1.TaintEffectNoSchedule {
|
||||
nodeInformerCh <- true
|
||||
break
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
if _, err := clientset.CoreV1().Nodes().Create(networkUnavailableNode); err != nil {
|
||||
t.Errorf("Case 3: Failed to create node: %v", err)
|
||||
} else {
|
||||
select {
|
||||
case <-time.After(60 * time.Second):
|
||||
t.Errorf("Case 3: Failed to taint node after 60s.")
|
||||
case <-nodeInformerCh:
|
||||
}
|
||||
}
|
||||
|
||||
// Case 4: Schedule Pod with NetworkUnavailable toleration.
|
||||
networkDaemonPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "network-daemon-pod",
|
||||
Namespace: nsName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: "busybox",
|
||||
},
|
||||
},
|
||||
Tolerations: []v1.Toleration{
|
||||
{
|
||||
Key: algorithm.TaintNodeNetworkUnavailable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := clientset.CoreV1().Pods(nsName).Create(networkDaemonPod); err != nil {
|
||||
t.Errorf("Case 4: Failed to create pod for network daemon: %v", err)
|
||||
} else {
|
||||
if err := waitForPodToScheduleWithTimeout(clientset, networkDaemonPod, time.Second*60); err != nil {
|
||||
t.Errorf("Case 4: Failed to schedule network daemon pod in 60s.")
|
||||
}
|
||||
}
|
||||
}
|
395
vendor/k8s.io/kubernetes/test/integration/scheduler/util.go
generated
vendored
Normal file
395
vendor/k8s.io/kubernetes/test/integration/scheduler/util.go
generated
vendored
Normal file
@ -0,0 +1,395 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientv1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
||||
"net/http/httptest"
|
||||
)
|
||||
|
||||
type TestContext struct {
|
||||
closeFn framework.CloseFunc
|
||||
httpServer *httptest.Server
|
||||
ns *v1.Namespace
|
||||
clientSet *clientset.Clientset
|
||||
informerFactory informers.SharedInformerFactory
|
||||
schedulerConfigFactory scheduler.Configurator
|
||||
schedulerConfig *scheduler.Config
|
||||
scheduler *scheduler.Scheduler
|
||||
}
|
||||
|
||||
// initTest initializes a test environment and creates a scheduler with default
|
||||
// configuration.
|
||||
func initTest(t *testing.T, nsPrefix string) *TestContext {
|
||||
var context TestContext
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
_, context.httpServer, context.closeFn = framework.RunAMaster(masterConfig)
|
||||
|
||||
context.ns = framework.CreateTestingNamespace(nsPrefix+string(uuid.NewUUID()), context.httpServer, t)
|
||||
|
||||
context.clientSet = clientset.NewForConfigOrDie(&restclient.Config{Host: context.httpServer.URL})
|
||||
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, 0)
|
||||
podInformer := factory.NewPodInformer(context.clientSet, 12*time.Hour, v1.DefaultSchedulerName)
|
||||
context.schedulerConfigFactory = factory.NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
context.clientSet,
|
||||
context.informerFactory.Core().V1().Nodes(),
|
||||
podInformer,
|
||||
context.informerFactory.Core().V1().PersistentVolumes(),
|
||||
context.informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
context.informerFactory.Core().V1().ReplicationControllers(),
|
||||
context.informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
context.informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
context.informerFactory.Core().V1().Services(),
|
||||
context.informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
context.informerFactory.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
true,
|
||||
)
|
||||
var err error
|
||||
context.schedulerConfig, err = context.schedulerConfigFactory.Create()
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
context.schedulerConfig.Recorder = eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(context.clientSet.CoreV1().RESTClient()).Events("")})
|
||||
go podInformer.Informer().Run(context.schedulerConfig.StopEverything)
|
||||
context.informerFactory.Start(context.schedulerConfig.StopEverything)
|
||||
context.scheduler, err = scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{Config: context.schedulerConfig}, nil...)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler: %v", err)
|
||||
}
|
||||
context.scheduler.Run()
|
||||
return &context
|
||||
}
|
||||
|
||||
// cleanupTest deletes the scheduler and the test namespace. It should be called
|
||||
// at the end of a test.
|
||||
func cleanupTest(t *testing.T, context *TestContext) {
|
||||
// Kill the scheduler.
|
||||
close(context.schedulerConfig.StopEverything)
|
||||
// Cleanup nodes.
|
||||
context.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
framework.DeleteTestingNamespace(context.ns, context.httpServer, t)
|
||||
context.closeFn()
|
||||
}
|
||||
|
||||
// waitForReflection waits till the passFunc confirms that the object it expects
|
||||
// to see is in the store. Used to observe reflected events.
|
||||
func waitForReflection(t *testing.T, nodeLister corelisters.NodeLister, key string, passFunc func(n interface{}) bool) error {
|
||||
nodes := []*v1.Node{}
|
||||
err := wait.Poll(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
n, err := nodeLister.Get(key)
|
||||
|
||||
switch {
|
||||
case err == nil && passFunc(n):
|
||||
return true, nil
|
||||
case errors.IsNotFound(err):
|
||||
nodes = append(nodes, nil)
|
||||
case err != nil:
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
default:
|
||||
nodes = append(nodes, n)
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Logf("Logging consecutive node versions received from store:")
|
||||
for i, n := range nodes {
|
||||
t.Logf("%d: %#v", i, n)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// nodeHasLabels returns a function that checks if a node has all the given labels.
|
||||
func nodeHasLabels(cs clientset.Interface, nodeName string, labels map[string]string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
// This could be a connection error so we want to retry.
|
||||
return false, nil
|
||||
}
|
||||
for k, v := range labels {
|
||||
if node.Labels == nil || node.Labels[k] != v {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// waitForNodeLabels waits for the given node to have all the given labels.
|
||||
func waitForNodeLabels(cs clientset.Interface, nodeName string, labels map[string]string) error {
|
||||
return wait.Poll(time.Millisecond*100, wait.ForeverTestTimeout, nodeHasLabels(cs, nodeName, labels))
|
||||
}
|
||||
|
||||
// createNode creates a node with the given resource list and
|
||||
// returns a pointer and error status. If 'res' is nil, a predefined amount of
|
||||
// resource will be used.
|
||||
func createNode(cs clientset.Interface, name string, res *v1.ResourceList) (*v1.Node, error) {
|
||||
// if resource is nil, we use a default amount of resources for the node.
|
||||
if res == nil {
|
||||
res = &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
}
|
||||
}
|
||||
n := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: *res,
|
||||
},
|
||||
}
|
||||
return cs.CoreV1().Nodes().Create(n)
|
||||
}
|
||||
|
||||
// createNodes creates `numNodes` nodes. The created node names will be in the
|
||||
// form of "`prefix`-X" where X is an ordinal.
|
||||
func createNodes(cs clientset.Interface, prefix string, res *v1.ResourceList, numNodes int) ([]*v1.Node, error) {
|
||||
nodes := make([]*v1.Node, numNodes)
|
||||
for i := 0; i < numNodes; i++ {
|
||||
nodeName := fmt.Sprintf("%v-%d", prefix, i)
|
||||
node, err := createNode(cs, nodeName, res)
|
||||
if err != nil {
|
||||
return nodes[:], err
|
||||
}
|
||||
nodes[i] = node
|
||||
}
|
||||
return nodes[:], nil
|
||||
}
|
||||
|
||||
type pausePodConfig struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Affinity *v1.Affinity
|
||||
Annotations, Labels, NodeSelector map[string]string
|
||||
Resources *v1.ResourceRequirements
|
||||
Tolerations []v1.Toleration
|
||||
NodeName string
|
||||
SchedulerName string
|
||||
Priority *int32
|
||||
}
|
||||
|
||||
// initPausePod initializes a pod API object from the given config. It is used
|
||||
// mainly in pod creation process.
|
||||
func initPausePod(cs clientset.Interface, conf *pausePodConfig) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: conf.Name,
|
||||
Namespace: conf.Namespace,
|
||||
Labels: conf.Labels,
|
||||
Annotations: conf.Annotations,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeSelector: conf.NodeSelector,
|
||||
Affinity: conf.Affinity,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: conf.Name,
|
||||
Image: framework.GetPauseImageName(cs),
|
||||
},
|
||||
},
|
||||
Tolerations: conf.Tolerations,
|
||||
NodeName: conf.NodeName,
|
||||
SchedulerName: conf.SchedulerName,
|
||||
Priority: conf.Priority,
|
||||
},
|
||||
}
|
||||
if conf.Resources != nil {
|
||||
pod.Spec.Containers[0].Resources = *conf.Resources
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
// createPausePod creates a pod with "Pause" image and the given config and
|
||||
// return its pointer and error status.
|
||||
func createPausePod(cs clientset.Interface, p *v1.Pod) (*v1.Pod, error) {
|
||||
return cs.CoreV1().Pods(p.Namespace).Create(p)
|
||||
}
|
||||
|
||||
// createPausePodWithResource creates a pod with "Pause" image and the given
|
||||
// resources and returns its pointer and error status. The resource list can be
|
||||
// nil.
|
||||
func createPausePodWithResource(cs clientset.Interface, podName string, nsName string, res *v1.ResourceList) (*v1.Pod, error) {
|
||||
var conf pausePodConfig
|
||||
if res == nil {
|
||||
conf = pausePodConfig{
|
||||
Name: podName,
|
||||
Namespace: nsName,
|
||||
}
|
||||
} else {
|
||||
conf = pausePodConfig{
|
||||
Name: podName,
|
||||
Namespace: nsName,
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: *res,
|
||||
},
|
||||
}
|
||||
}
|
||||
return createPausePod(cs, initPausePod(cs, &conf))
|
||||
}
|
||||
|
||||
// runPausePod creates a pod with "Pause" image and the given config and waits
|
||||
// until it is scheduled. It returns its pointer and error status.
|
||||
func runPausePod(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error) {
|
||||
pod, err := cs.CoreV1().Pods(pod.Namespace).Create(pod)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error creating pause pod: %v", err)
|
||||
}
|
||||
if err = waitForPodToSchedule(cs, pod); err != nil {
|
||||
return pod, fmt.Errorf("Pod %v didn't schedule successfully. Error: %v", pod.Name, err)
|
||||
}
|
||||
if pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err != nil {
|
||||
return pod, fmt.Errorf("Error getting pod %v info: %v", pod.Name, err)
|
||||
}
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// podDeleted returns true if a pod is not found in the given namespace.
|
||||
func podDeleted(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
if pod.DeletionTimestamp != nil {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// podIsGettingEvicted returns true if the pod's deletion timestamp is set.
|
||||
func podIsGettingEvicted(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if pod.DeletionTimestamp != nil {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// podScheduled returns true if a node is assigned to the given pod.
|
||||
func podScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
// This could be a connection error so we want to retry.
|
||||
return false, nil
|
||||
}
|
||||
if pod.Spec.NodeName == "" {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// podUnschedulable returns a condition function that returns true if the given pod
|
||||
// gets unschedulable status.
|
||||
func podUnschedulable(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
// This could be a connection error so we want to retry.
|
||||
return false, nil
|
||||
}
|
||||
_, cond := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
|
||||
return cond != nil && cond.Status == v1.ConditionFalse && cond.Reason == v1.PodReasonUnschedulable, nil
|
||||
}
|
||||
}
|
||||
|
||||
// waitForPodToScheduleWithTimeout waits for a pod to get scheduled and returns
|
||||
// an error if it does not scheduled within the given timeout.
|
||||
func waitForPodToScheduleWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
|
||||
return wait.Poll(100*time.Millisecond, timeout, podScheduled(cs, pod.Namespace, pod.Name))
|
||||
}
|
||||
|
||||
// waitForPodToSchedule waits for a pod to get scheduled and returns an error if
|
||||
// it does not get scheduled within the timeout duration (30 seconds).
|
||||
func waitForPodToSchedule(cs clientset.Interface, pod *v1.Pod) error {
|
||||
return waitForPodToScheduleWithTimeout(cs, pod, 30*time.Second)
|
||||
}
|
||||
|
||||
// deletePod deletes the given pod in the given namespace.
|
||||
func deletePod(cs clientset.Interface, podName string, nsName string) error {
|
||||
return cs.CoreV1().Pods(nsName).Delete(podName, metav1.NewDeleteOptions(0))
|
||||
}
|
||||
|
||||
// cleanupPods deletes the given pods and waits for them to be actually deleted.
|
||||
func cleanupPods(cs clientset.Interface, t *testing.T, pods []*v1.Pod) {
|
||||
for _, p := range pods {
|
||||
err := cs.CoreV1().Pods(p.Namespace).Delete(p.Name, metav1.NewDeleteOptions(0))
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
t.Errorf("error while deleting pod %v/%v: %v", p.Namespace, p.Name, err)
|
||||
}
|
||||
}
|
||||
for _, p := range pods {
|
||||
if err := wait.Poll(time.Second, wait.ForeverTestTimeout, podDeleted(cs, p.Namespace, p.Name)); err != nil {
|
||||
t.Errorf("error while waiting for pod %v/%v to get deleted: %v", p.Namespace, p.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// printAllPods prints a list of all the pods and their node names. This is used
|
||||
// for debugging.
|
||||
func printAllPods(t *testing.T, cs clientset.Interface, nsName string) {
|
||||
podList, err := cs.CoreV1().Pods(nsName).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Logf("Error getting pods: %v", err)
|
||||
}
|
||||
for _, pod := range podList.Items {
|
||||
t.Logf("Pod:\n\tName:%v\n\tNamespace:%v\n\tNode Name:%v\n", pod.Name, pod.Namespace, pod.Spec.NodeName)
|
||||
}
|
||||
}
|
494
vendor/k8s.io/kubernetes/test/integration/scheduler/volume_binding_test.go
generated
vendored
Normal file
494
vendor/k8s.io/kubernetes/test/integration/scheduler/volume_binding_test.go
generated
vendored
Normal file
@ -0,0 +1,494 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scheduler
|
||||
|
||||
// This file tests the VolumeScheduling feature.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
type testConfig struct {
|
||||
client clientset.Interface
|
||||
ns string
|
||||
stop <-chan struct{}
|
||||
teardown func()
|
||||
}
|
||||
|
||||
var (
|
||||
// Delete API objects immediately
|
||||
deletePeriod = int64(0)
|
||||
deleteOption = &metav1.DeleteOptions{GracePeriodSeconds: &deletePeriod}
|
||||
|
||||
modeWait = storagev1.VolumeBindingWaitForFirstConsumer
|
||||
modeImmediate = storagev1.VolumeBindingImmediate
|
||||
|
||||
classWait = "wait"
|
||||
classImmediate = "immediate"
|
||||
)
|
||||
|
||||
const (
|
||||
labelKey = "test-label"
|
||||
labelValue = "test-value"
|
||||
nodeName = "node1"
|
||||
podLimit = 100
|
||||
volsPerPod = 5
|
||||
)
|
||||
|
||||
func TestVolumeBinding(t *testing.T) {
|
||||
config := setup(t, "volume-scheduling")
|
||||
defer config.teardown()
|
||||
|
||||
cases := map[string]struct {
|
||||
pod *v1.Pod
|
||||
pvs []*v1.PersistentVolume
|
||||
pvcs []*v1.PersistentVolumeClaim
|
||||
}{
|
||||
"immediate can bind": {
|
||||
pod: makePod("pod-i-canbind", config.ns, []string{"pvc-i-canbind"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-i-canbind", classImmediate, "", "")},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-canbind", config.ns, &classImmediate, "")},
|
||||
},
|
||||
"immediate pvc prebound": {
|
||||
pod: makePod("pod-i-pvc-prebound", config.ns, []string{"pvc-i-prebound"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-i-pvc-prebound", classImmediate, "", "")},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-prebound", config.ns, &classImmediate, "pv-i-pvc-prebound")},
|
||||
},
|
||||
"immediate pv prebound": {
|
||||
pod: makePod("pod-i-pv-prebound", config.ns, []string{"pvc-i-pv-prebound"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-i-prebound", classImmediate, "pvc-i-pv-prebound", config.ns)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-pv-prebound", config.ns, &classImmediate, "")},
|
||||
},
|
||||
"wait can bind": {
|
||||
pod: makePod("pod-w-canbind", config.ns, []string{"pvc-w-canbind"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-w-canbind", classWait, "", "")},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-canbind", config.ns, &classWait, "")},
|
||||
},
|
||||
"wait pvc prebound": {
|
||||
pod: makePod("pod-w-pvc-prebound", config.ns, []string{"pvc-w-prebound"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-w-pvc-prebound", classWait, "", "")},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-prebound", config.ns, &classWait, "pv-w-pvc-prebound")},
|
||||
},
|
||||
"wait pv prebound": {
|
||||
pod: makePod("pod-w-pv-prebound", config.ns, []string{"pvc-w-pv-prebound"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-w-prebound", classWait, "pvc-w-pv-prebound", config.ns)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-pv-prebound", config.ns, &classWait, "")},
|
||||
},
|
||||
"wait can bind two": {
|
||||
pod: makePod("pod-w-canbind-2", config.ns, []string{"pvc-w-canbind-2", "pvc-w-canbind-3"}),
|
||||
pvs: []*v1.PersistentVolume{
|
||||
makePV(t, "pv-w-canbind-2", classWait, "", ""),
|
||||
makePV(t, "pv-w-canbind-3", classWait, "", ""),
|
||||
},
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-w-canbind-2", config.ns, &classWait, ""),
|
||||
makePVC("pvc-w-canbind-3", config.ns, &classWait, ""),
|
||||
},
|
||||
},
|
||||
"mix immediate and wait": {
|
||||
pod: makePod("pod-mix-bound", config.ns, []string{"pvc-w-canbind-4", "pvc-i-canbind-2"}),
|
||||
pvs: []*v1.PersistentVolume{
|
||||
makePV(t, "pv-w-canbind-4", classWait, "", ""),
|
||||
makePV(t, "pv-i-canbind-2", classImmediate, "", ""),
|
||||
},
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-w-canbind-4", config.ns, &classWait, ""),
|
||||
makePVC("pvc-i-canbind-2", config.ns, &classImmediate, ""),
|
||||
},
|
||||
},
|
||||
// TODO:
|
||||
// immediate mode - PVC cannot bound
|
||||
// wait mode - PVC cannot bind
|
||||
// wait mode - 2 PVCs, 1 cannot bind
|
||||
}
|
||||
|
||||
for name, test := range cases {
|
||||
glog.Infof("Running test %v", name)
|
||||
|
||||
// Create PVs
|
||||
for _, pv := range test.pvs {
|
||||
if _, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create PVCs
|
||||
for _, pvc := range test.pvcs {
|
||||
if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create Pod
|
||||
if _, err := config.client.CoreV1().Pods(config.ns).Create(test.pod); err != nil {
|
||||
t.Fatalf("Failed to create Pod %q: %v", test.pod.Name, err)
|
||||
}
|
||||
if err := waitForPodToSchedule(config.client, test.pod); err != nil {
|
||||
t.Errorf("Failed to schedule Pod %q: %v", test.pod.Name, err)
|
||||
}
|
||||
|
||||
// Validate PVC/PV binding
|
||||
for _, pvc := range test.pvcs {
|
||||
validatePVCPhase(t, config.client, pvc, v1.ClaimBound)
|
||||
}
|
||||
for _, pv := range test.pvs {
|
||||
validatePVPhase(t, config.client, pv, v1.VolumeBound)
|
||||
}
|
||||
|
||||
// TODO: validate events on Pods and PVCs
|
||||
|
||||
config.client.CoreV1().Pods(config.ns).DeleteCollection(deleteOption, metav1.ListOptions{})
|
||||
config.client.CoreV1().PersistentVolumeClaims(config.ns).DeleteCollection(deleteOption, metav1.ListOptions{})
|
||||
config.client.CoreV1().PersistentVolumes().DeleteCollection(deleteOption, metav1.ListOptions{})
|
||||
}
|
||||
}
|
||||
|
||||
// TestVolumeBindingStress creates <podLimit> pods, each with <volsPerPod> unbound PVCs.
|
||||
func TestVolumeBindingStress(t *testing.T) {
|
||||
config := setup(t, "volume-binding-stress")
|
||||
defer config.teardown()
|
||||
|
||||
// Create enough PVs and PVCs for all the pods
|
||||
pvs := []*v1.PersistentVolume{}
|
||||
pvcs := []*v1.PersistentVolumeClaim{}
|
||||
for i := 0; i < podLimit*volsPerPod; i++ {
|
||||
pv := makePV(t, fmt.Sprintf("pv-stress-%v", i), classWait, "", "")
|
||||
pvc := makePVC(fmt.Sprintf("pvc-stress-%v", i), config.ns, &classWait, "")
|
||||
|
||||
if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||
}
|
||||
if pvc, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err)
|
||||
}
|
||||
|
||||
pvs = append(pvs, pv)
|
||||
pvcs = append(pvcs, pvc)
|
||||
}
|
||||
|
||||
pods := []*v1.Pod{}
|
||||
for i := 0; i < podLimit; i++ {
|
||||
// Generate string of all the PVCs for the pod
|
||||
podPvcs := []string{}
|
||||
for j := i * volsPerPod; j < (i+1)*volsPerPod; j++ {
|
||||
podPvcs = append(podPvcs, pvcs[j].Name)
|
||||
}
|
||||
|
||||
pod := makePod(fmt.Sprintf("pod%v", i), config.ns, podPvcs)
|
||||
if pod, err := config.client.CoreV1().Pods(config.ns).Create(pod); err != nil {
|
||||
t.Fatalf("Failed to create Pod %q: %v", pod.Name, err)
|
||||
}
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
|
||||
// Validate Pods scheduled
|
||||
for _, pod := range pods {
|
||||
if err := waitForPodToSchedule(config.client, pod); err != nil {
|
||||
t.Errorf("Failed to schedule Pod %q: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate PVC/PV binding
|
||||
for _, pvc := range pvcs {
|
||||
validatePVCPhase(t, config.client, pvc, v1.ClaimBound)
|
||||
}
|
||||
for _, pv := range pvs {
|
||||
validatePVPhase(t, config.client, pv, v1.VolumeBound)
|
||||
}
|
||||
|
||||
// TODO: validate events on Pods and PVCs
|
||||
}
|
||||
|
||||
func setup(t *testing.T, nsName string) *testConfig {
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-h.Initialized
|
||||
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
|
||||
// Enable feature gates
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true,PersistentLocalVolumes=true")
|
||||
|
||||
// Build clientset and informers for controllers.
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
informers := informers.NewSharedInformerFactory(clientset, time.Second)
|
||||
|
||||
// Start master
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
_, _, closeFn := framework.RunAMasterUsingServer(masterConfig, s, h)
|
||||
ns := framework.CreateTestingNamespace(nsName, s, t).Name
|
||||
|
||||
controllerCh := make(chan struct{})
|
||||
|
||||
// Start PV controller for volume binding.
|
||||
params := persistentvolume.ControllerParameters{
|
||||
KubeClient: clientset,
|
||||
SyncPeriod: time.Hour, // test shouldn't need to resync
|
||||
VolumePlugins: nil, // TODO; need later for dynamic provisioning
|
||||
Cloud: nil,
|
||||
ClusterName: "volume-test-cluster",
|
||||
VolumeInformer: informers.Core().V1().PersistentVolumes(),
|
||||
ClaimInformer: informers.Core().V1().PersistentVolumeClaims(),
|
||||
ClassInformer: informers.Storage().V1().StorageClasses(),
|
||||
EventRecorder: nil, // TODO: add one so we can test PV events
|
||||
EnableDynamicProvisioning: true,
|
||||
}
|
||||
ctrl, err := persistentvolume.NewController(params)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create PV controller: %v", err)
|
||||
}
|
||||
go ctrl.Run(controllerCh)
|
||||
|
||||
// Start scheduler
|
||||
configurator := factory.NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
clientset,
|
||||
informers.Core().V1().Nodes(),
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Core().V1().PersistentVolumes(),
|
||||
informers.Core().V1().PersistentVolumeClaims(),
|
||||
informers.Core().V1().ReplicationControllers(),
|
||||
informers.Extensions().V1beta1().ReplicaSets(),
|
||||
informers.Apps().V1beta1().StatefulSets(),
|
||||
informers.Core().V1().Services(),
|
||||
informers.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informers.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
true, // Enable EqualCache by default.
|
||||
)
|
||||
|
||||
sched, err := scheduler.NewFromConfigurator(configurator, func(cfg *scheduler.Config) {
|
||||
cfg.StopEverything = controllerCh
|
||||
cfg.Recorder = &record.FakeRecorder{}
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create scheduler: %v.", err)
|
||||
}
|
||||
go sched.Run()
|
||||
|
||||
// Waiting for all controller sync.
|
||||
informers.Start(controllerCh)
|
||||
informers.WaitForCacheSync(controllerCh)
|
||||
|
||||
// Create shared objects
|
||||
// Create node
|
||||
testNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
Labels: map[string]string{labelKey: labelValue},
|
||||
},
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(podLimit, resource.DecimalSI),
|
||||
},
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: fmt.Sprintf("schedulable condition"),
|
||||
LastHeartbeatTime: metav1.Time{Time: time.Now()},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if _, err := clientset.CoreV1().Nodes().Create(testNode); err != nil {
|
||||
t.Fatalf("Failed to create Node %q: %v", testNode.Name, err)
|
||||
}
|
||||
|
||||
// Create SCs
|
||||
scs := []*storagev1.StorageClass{
|
||||
makeStorageClass(classWait, &modeWait),
|
||||
makeStorageClass(classImmediate, &modeImmediate),
|
||||
}
|
||||
for _, sc := range scs {
|
||||
if _, err := clientset.StorageV1().StorageClasses().Create(sc); err != nil {
|
||||
t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
return &testConfig{
|
||||
client: clientset,
|
||||
ns: ns,
|
||||
stop: controllerCh,
|
||||
teardown: func() {
|
||||
clientset.CoreV1().Pods(ns).DeleteCollection(nil, metav1.ListOptions{})
|
||||
clientset.CoreV1().PersistentVolumeClaims(ns).DeleteCollection(nil, metav1.ListOptions{})
|
||||
clientset.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
clientset.StorageV1().StorageClasses().DeleteCollection(nil, metav1.ListOptions{})
|
||||
clientset.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
close(controllerCh)
|
||||
closeFn()
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false,LocalPersistentVolumes=false")
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makeStorageClass(name string, mode *storagev1.VolumeBindingMode) *storagev1.StorageClass {
|
||||
return &storagev1.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Provisioner: "kubernetes.io/no-provisioner",
|
||||
VolumeBindingMode: mode,
|
||||
}
|
||||
}
|
||||
|
||||
func makePV(t *testing.T, name, scName, pvcName, ns string) *v1.PersistentVolume {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("5Gi"),
|
||||
},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
StorageClassName: scName,
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Local: &v1.LocalVolumeSource{
|
||||
Path: "/test-path",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if pvcName != "" {
|
||||
pv.Spec.ClaimRef = &v1.ObjectReference{Name: pvcName, Namespace: ns}
|
||||
}
|
||||
|
||||
testNodeAffinity := &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: labelKey,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{labelValue},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := helper.StorageNodeAffinityToAlphaAnnotation(pv.Annotations, testNodeAffinity)
|
||||
if err != nil {
|
||||
t.Fatalf("Setting storage node affinity failed: %v", err)
|
||||
}
|
||||
return pv
|
||||
}
|
||||
|
||||
func makePVC(name, ns string, scName *string, volumeName string) *v1.PersistentVolumeClaim {
|
||||
return &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("5Gi"),
|
||||
},
|
||||
},
|
||||
StorageClassName: scName,
|
||||
VolumeName: volumeName,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makePod(name, ns string, pvcs []string) *v1.Pod {
|
||||
volumes := []v1.Volume{}
|
||||
for i, pvc := range pvcs {
|
||||
volumes = append(volumes, v1.Volume{
|
||||
Name: fmt.Sprintf("vol%v", i),
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "write-pod",
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", "while true; do sleep 1; done"},
|
||||
},
|
||||
},
|
||||
Volumes: volumes,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func validatePVCPhase(t *testing.T, client clientset.Interface, pvc *v1.PersistentVolumeClaim, phase v1.PersistentVolumeClaimPhase) {
|
||||
claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get PVC %v/%v: %v", pvc.Namespace, pvc.Name, err)
|
||||
}
|
||||
|
||||
if claim.Status.Phase != phase {
|
||||
t.Errorf("PVC %v/%v phase not %v, got %v", pvc.Namespace, pvc.Name, phase, claim.Status.Phase)
|
||||
}
|
||||
}
|
||||
|
||||
func validatePVPhase(t *testing.T, client clientset.Interface, pv *v1.PersistentVolume, phase v1.PersistentVolumePhase) {
|
||||
pv, err := client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get PV %v: %v", pv.Name, err)
|
||||
}
|
||||
|
||||
if pv.Status.Phase != phase {
|
||||
t.Errorf("PV %v phase not %v, got %v", pv.Name, phase, pv.Status.Phase)
|
||||
}
|
||||
}
|
67
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/BUILD
generated
vendored
Normal file
67
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/BUILD
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"scheduler_perf_types.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/scheduler_perf",
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//plugin/pkg/scheduler:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//plugin/pkg/scheduler/factory:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"main_test.go",
|
||||
"scheduler_bench_test.go",
|
||||
"scheduler_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/scheduler_perf",
|
||||
library = ":go_default_library",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//plugin/pkg/scheduler:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
16
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/OWNERS
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/OWNERS
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
approvers:
|
||||
- bsalamat
|
||||
- davidopp
|
||||
- gmarek
|
||||
- jayunit100
|
||||
- timothysc
|
||||
- wojtek-t
|
||||
reviewers:
|
||||
- bsalamat
|
||||
- davidopp
|
||||
- jayunit100
|
||||
- k82cn
|
||||
- ravisantoshgudimetla
|
||||
- sjug
|
||||
- timothysc
|
||||
- wojtek-t
|
48
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/README.md
generated
vendored
Normal file
48
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/README.md
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
Scheduler Performance Test
|
||||
======
|
||||
|
||||
Motivation
|
||||
------
|
||||
We already have a performance testing system -- Kubemark. However, Kubemark requires setting up and bootstrapping a whole cluster, which takes a lot of time.
|
||||
|
||||
We want to have a standard way to reproduce scheduling latency metrics result and benchmark scheduler as simple and fast as possible. We have the following goals:
|
||||
|
||||
- Save time on testing
|
||||
- The test and benchmark can be run in a single box.
|
||||
We only set up components necessary to scheduling without booting up a cluster.
|
||||
- Profiling runtime metrics to find out bottleneck
|
||||
- Write scheduler integration test but focus on performance measurement.
|
||||
Take advantage of go profiling tools and collect fine-grained metrics,
|
||||
like cpu-profiling, memory-profiling and block-profiling.
|
||||
- Reproduce test result easily
|
||||
- We want to have a known place to do the performance related test for scheduler.
|
||||
Developers should just run one script to collect all the information they need.
|
||||
|
||||
Currently the test suite has the following:
|
||||
|
||||
- density test (by adding a new Go test)
|
||||
- schedule 30k pods on 1000 (fake) nodes and 3k pods on 100 (fake) nodes
|
||||
- print out scheduling rate every second
|
||||
- let you learn the rate changes vs number of scheduled pods
|
||||
- benchmark
|
||||
- make use of `go test -bench` and report nanosecond/op.
|
||||
- schedule b.N pods when the cluster has N nodes and P scheduled pods. Since it takes relatively long time to finish one round, b.N is small: 10 - 100.
|
||||
|
||||
|
||||
How To Run
|
||||
------
|
||||
```shell
|
||||
# In Kubernetes root path
|
||||
make generated_files
|
||||
|
||||
cd test/integration/scheduler_perf
|
||||
./test-performance.sh
|
||||
```
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
||||
|
||||
[]()
|
27
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package benchmark
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
107
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_bench_test.go
generated
vendored
Normal file
107
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_bench_test.go
generated
vendored
Normal file
@ -0,0 +1,107 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package benchmark
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// BenchmarkScheduling100Nodes0Pods benchmarks the scheduling rate
|
||||
// when the cluster has 100 nodes and 0 scheduled pods
|
||||
func BenchmarkScheduling100Nodes0Pods(b *testing.B) {
|
||||
benchmarkScheduling(100, 0, b)
|
||||
}
|
||||
|
||||
// BenchmarkScheduling100Nodes1000Pods benchmarks the scheduling rate
|
||||
// when the cluster has 100 nodes and 1000 scheduled pods
|
||||
func BenchmarkScheduling100Nodes1000Pods(b *testing.B) {
|
||||
benchmarkScheduling(100, 1000, b)
|
||||
}
|
||||
|
||||
// BenchmarkScheduling1000Nodes0Pods benchmarks the scheduling rate
|
||||
// when the cluster has 1000 nodes and 0 scheduled pods
|
||||
func BenchmarkScheduling1000Nodes0Pods(b *testing.B) {
|
||||
benchmarkScheduling(1000, 0, b)
|
||||
}
|
||||
|
||||
// BenchmarkScheduling1000Nodes1000Pods benchmarks the scheduling rate
|
||||
// when the cluster has 1000 nodes and 1000 scheduled pods
|
||||
func BenchmarkScheduling1000Nodes1000Pods(b *testing.B) {
|
||||
benchmarkScheduling(1000, 1000, b)
|
||||
}
|
||||
|
||||
// benchmarkScheduling benchmarks scheduling rate with specific number of nodes
|
||||
// and specific number of pods already scheduled. Since an operation takes relatively
|
||||
// long time, b.N should be small: 10 - 100.
|
||||
func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
|
||||
schedulerConfigFactory, finalFunc := mustSetupScheduler()
|
||||
defer finalFunc()
|
||||
c := schedulerConfigFactory.GetClient()
|
||||
|
||||
nodePreparer := framework.NewIntegrationTestNodePreparer(
|
||||
c,
|
||||
[]testutils.CountToStrategy{{Count: numNodes, Strategy: &testutils.TrivialNodePrepareStrategy{}}},
|
||||
"scheduler-perf-",
|
||||
)
|
||||
if err := nodePreparer.PrepareNodes(); err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
defer nodePreparer.CleanupNodes()
|
||||
|
||||
config := testutils.NewTestPodCreatorConfig()
|
||||
config.AddStrategy("sched-test", numScheduledPods, testutils.NewSimpleWithControllerCreatePodStrategy("rc1"))
|
||||
podCreator := testutils.NewTestPodCreator(c, config)
|
||||
podCreator.CreatePods()
|
||||
|
||||
for {
|
||||
scheduled, err := schedulerConfigFactory.GetScheduledPodLister().List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
if len(scheduled) >= numScheduledPods {
|
||||
break
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
// start benchmark
|
||||
b.ResetTimer()
|
||||
config = testutils.NewTestPodCreatorConfig()
|
||||
config.AddStrategy("sched-test", b.N, testutils.NewSimpleWithControllerCreatePodStrategy("rc2"))
|
||||
podCreator = testutils.NewTestPodCreator(c, config)
|
||||
podCreator.CreatePods()
|
||||
for {
|
||||
// This can potentially affect performance of scheduler, since List() is done under mutex.
|
||||
// TODO: Setup watch on apiserver and wait until all pods scheduled.
|
||||
scheduled, err := schedulerConfigFactory.GetScheduledPodLister().List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
if len(scheduled) >= numScheduledPods+b.N {
|
||||
break
|
||||
}
|
||||
// Note: This might introduce slight deviation in accuracy of benchmark results.
|
||||
// Since the total amount of time is relatively large, it might not be a concern.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
31
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_perf_types.go
generated
vendored
Normal file
31
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_perf_types.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package benchmark
|
||||
|
||||
// High Level Configuration for all predicates and priorities.
|
||||
type schedulerPerfConfig struct {
|
||||
NodeCount int // The number of nodes which will be seeded with metadata to match predicates and have non-trivial priority rankings.
|
||||
PodCount int // The number of pods which will be seeded with metadata to match predicates and have non-trivial priority rankings.
|
||||
NodeAffinity *nodeAffinity
|
||||
// TODO: Other predicates and priorities to be added here.
|
||||
}
|
||||
|
||||
// nodeAffinity priority configuration details.
|
||||
type nodeAffinity struct {
|
||||
nodeAffinityKey string // Node Selection Key.
|
||||
LabelCount int // number of labels to be added to each node or pod.
|
||||
}
|
277
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_test.go
generated
vendored
Normal file
277
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_test.go
generated
vendored
Normal file
@ -0,0 +1,277 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package benchmark
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
"math"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
warning3K = 100
|
||||
threshold3K = 30
|
||||
threshold30K = 30
|
||||
threshold60K = 30
|
||||
)
|
||||
|
||||
var (
|
||||
basePodTemplate = &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "sched-perf-pod-",
|
||||
},
|
||||
// TODO: this needs to be configurable.
|
||||
Spec: testutils.MakePodSpec(),
|
||||
}
|
||||
baseNodeTemplate = &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "sample-node-",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
// TODO: investigate why this is needed.
|
||||
ExternalID: "foo",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("32Gi"),
|
||||
},
|
||||
Phase: v1.NodeRunning,
|
||||
Conditions: []v1.NodeCondition{
|
||||
{Type: v1.NodeReady, Status: v1.ConditionTrue},
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// TestSchedule100Node3KPods schedules 3k pods on 100 nodes.
|
||||
func TestSchedule100Node3KPods(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping because we want to run short tests")
|
||||
}
|
||||
|
||||
config := getBaseConfig(100, 3000)
|
||||
err := writePodAndNodeTopologyToConfig(config)
|
||||
if err != nil {
|
||||
t.Errorf("Misconfiguration happened for nodes/pods chosen to have predicates and priorities")
|
||||
}
|
||||
min := schedulePods(config)
|
||||
if min < threshold3K {
|
||||
t.Errorf("Failing: Scheduling rate was too low for an interval, we saw rate of %v, which is the allowed minimum of %v ! ", min, threshold3K)
|
||||
} else if min < warning3K {
|
||||
fmt.Printf("Warning: pod scheduling throughput for 3k pods was slow for an interval... Saw a interval with very low (%v) scheduling rate!", min)
|
||||
} else {
|
||||
fmt.Printf("Minimal observed throughput for 3k pod test: %v\n", min)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSchedule2000Node60KPods schedules 60k pods on 2000 nodes.
|
||||
// This test won't fit in normal 10 minutes time window.
|
||||
// func TestSchedule2000Node60KPods(t *testing.T) {
|
||||
// if testing.Short() {
|
||||
// t.Skip("Skipping because we want to run short tests")
|
||||
// }
|
||||
// config := defaultSchedulerBenchmarkConfig(2000, 60000)
|
||||
// if min := schedulePods(config); min < threshold60K {
|
||||
// t.Errorf("To small pod scheduling throughput for 60k pods. Expected %v got %v", threshold60K, min)
|
||||
// } else {
|
||||
// fmt.Printf("Minimal observed throughput for 60k pod test: %v\n", min)
|
||||
// }
|
||||
// }
|
||||
|
||||
// testConfig contains the some input parameters needed for running test-suite
|
||||
type testConfig struct {
|
||||
numPods int
|
||||
numNodes int
|
||||
mutatedNodeTemplate *v1.Node
|
||||
mutatedPodTemplate *v1.Pod
|
||||
schedulerSupportFunctions scheduler.Configurator
|
||||
destroyFunc func()
|
||||
}
|
||||
|
||||
// getBaseConfig returns baseConfig after initializing number of nodes and pods.
|
||||
func getBaseConfig(nodes int, pods int) *testConfig {
|
||||
schedulerConfigFactory, destroyFunc := mustSetupScheduler()
|
||||
return &testConfig{
|
||||
schedulerSupportFunctions: schedulerConfigFactory,
|
||||
destroyFunc: destroyFunc,
|
||||
numNodes: nodes,
|
||||
numPods: pods,
|
||||
}
|
||||
}
|
||||
|
||||
// schedulePods schedules specific number of pods on specific number of nodes.
|
||||
// This is used to learn the scheduling throughput on various
|
||||
// sizes of cluster and changes as more and more pods are scheduled.
|
||||
// It won't stop until all pods are scheduled.
|
||||
// It returns the minimum of throughput over whole run.
|
||||
func schedulePods(config *testConfig) int32 {
|
||||
defer config.destroyFunc()
|
||||
prev := 0
|
||||
// On startup there may be a latent period where NO scheduling occurs (qps = 0).
|
||||
// We are interested in low scheduling rates (i.e. qps=2),
|
||||
minQps := int32(math.MaxInt32)
|
||||
start := time.Now()
|
||||
// Bake in time for the first pod scheduling event.
|
||||
for {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
scheduled, err := config.schedulerSupportFunctions.GetScheduledPodLister().List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
// 30,000 pods -> wait till @ least 300 are scheduled to start measuring.
|
||||
// TODO Find out why sometimes there may be scheduling blips in the beggining.
|
||||
if len(scheduled) > config.numPods/100 {
|
||||
break
|
||||
}
|
||||
}
|
||||
// map minimum QPS entries in a counter, useful for debugging tests.
|
||||
qpsStats := map[int]int{}
|
||||
|
||||
// Now that scheduling has started, lets start taking the pulse on how many pods are happening per second.
|
||||
for {
|
||||
// This can potentially affect performance of scheduler, since List() is done under mutex.
|
||||
// Listing 10000 pods is an expensive operation, so running it frequently may impact scheduler.
|
||||
// TODO: Setup watch on apiserver and wait until all pods scheduled.
|
||||
scheduled, err := config.schedulerSupportFunctions.GetScheduledPodLister().List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
// We will be completed when all pods are done being scheduled.
|
||||
// return the worst-case-scenario interval that was seen during this time.
|
||||
// Note this should never be low due to cold-start, so allow bake in sched time if necessary.
|
||||
if len(scheduled) >= config.numPods {
|
||||
fmt.Printf("Scheduled %v Pods in %v seconds (%v per second on average). min QPS was %v\n",
|
||||
config.numPods, int(time.Since(start)/time.Second), config.numPods/int(time.Since(start)/time.Second), minQps)
|
||||
return minQps
|
||||
}
|
||||
|
||||
// There's no point in printing it for the last iteration, as the value is random
|
||||
qps := len(scheduled) - prev
|
||||
qpsStats[qps] += 1
|
||||
if int32(qps) < minQps {
|
||||
minQps = int32(qps)
|
||||
}
|
||||
fmt.Printf("%ds\trate: %d\ttotal: %d (qps frequency: %v)\n", time.Since(start)/time.Second, qps, len(scheduled), qpsStats)
|
||||
prev = len(scheduled)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// mutateNodeTemplate returns the modified node needed for creation of nodes.
|
||||
func (na nodeAffinity) mutateNodeTemplate(node *v1.Node) {
|
||||
labels := make(map[string]string)
|
||||
for i := 0; i < na.LabelCount; i++ {
|
||||
value := strconv.Itoa(i)
|
||||
key := na.nodeAffinityKey + value
|
||||
labels[key] = value
|
||||
}
|
||||
node.ObjectMeta.Labels = labels
|
||||
return
|
||||
}
|
||||
|
||||
// mutatePodTemplate returns the modified pod template after applying mutations.
|
||||
func (na nodeAffinity) mutatePodTemplate(pod *v1.Pod) {
|
||||
var nodeSelectorRequirements []v1.NodeSelectorRequirement
|
||||
for i := 0; i < na.LabelCount; i++ {
|
||||
value := strconv.Itoa(i)
|
||||
key := na.nodeAffinityKey + value
|
||||
nodeSelector := v1.NodeSelectorRequirement{Key: key, Values: []string{value}, Operator: v1.NodeSelectorOpIn}
|
||||
nodeSelectorRequirements = append(nodeSelectorRequirements, nodeSelector)
|
||||
}
|
||||
pod.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: nodeSelectorRequirements,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// generateNodes generates nodes to be used for scheduling.
|
||||
func (inputConfig *schedulerPerfConfig) generateNodes(config *testConfig) {
|
||||
for i := 0; i < inputConfig.NodeCount; i++ {
|
||||
config.schedulerSupportFunctions.GetClient().CoreV1().Nodes().Create(config.mutatedNodeTemplate)
|
||||
|
||||
}
|
||||
for i := 0; i < config.numNodes-inputConfig.NodeCount; i++ {
|
||||
config.schedulerSupportFunctions.GetClient().CoreV1().Nodes().Create(baseNodeTemplate)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// generatePods generates pods to be used for scheduling.
|
||||
func (inputConfig *schedulerPerfConfig) generatePods(config *testConfig) {
|
||||
testutils.CreatePod(config.schedulerSupportFunctions.GetClient(), "sample", inputConfig.PodCount, config.mutatedPodTemplate)
|
||||
testutils.CreatePod(config.schedulerSupportFunctions.GetClient(), "sample", config.numPods-inputConfig.PodCount, basePodTemplate)
|
||||
}
|
||||
|
||||
// generatePodAndNodeTopology is the wrapper function for modifying both pods and node objects.
|
||||
func (inputConfig *schedulerPerfConfig) generatePodAndNodeTopology(config *testConfig) error {
|
||||
if config.numNodes < inputConfig.NodeCount || config.numPods < inputConfig.PodCount {
|
||||
return fmt.Errorf("NodeCount cannot be greater than numNodes")
|
||||
}
|
||||
nodeAffinity := inputConfig.NodeAffinity
|
||||
// Node template that needs to be mutated.
|
||||
mutatedNodeTemplate := baseNodeTemplate
|
||||
// Pod template that needs to be mutated.
|
||||
mutatedPodTemplate := basePodTemplate
|
||||
if nodeAffinity != nil {
|
||||
nodeAffinity.mutateNodeTemplate(mutatedNodeTemplate)
|
||||
nodeAffinity.mutatePodTemplate(mutatedPodTemplate)
|
||||
|
||||
} // TODO: other predicates/priorities will be processed in subsequent if statements or a switch:).
|
||||
config.mutatedPodTemplate = mutatedPodTemplate
|
||||
config.mutatedNodeTemplate = mutatedNodeTemplate
|
||||
inputConfig.generateNodes(config)
|
||||
inputConfig.generatePods(config)
|
||||
return nil
|
||||
}
|
||||
|
||||
// writePodAndNodeTopologyToConfig reads a configuration and then applies it to a test configuration.
|
||||
//TODO: As of now, this function is not doing anything expect for reading input values to priority structs.
|
||||
func writePodAndNodeTopologyToConfig(config *testConfig) error {
|
||||
// High Level structure that should be filled for every predicate or priority.
|
||||
inputConfig := &schedulerPerfConfig{
|
||||
NodeCount: 100,
|
||||
PodCount: 3000,
|
||||
NodeAffinity: &nodeAffinity{
|
||||
nodeAffinityKey: "kubernetes.io/sched-perf-node-affinity-",
|
||||
LabelCount: 10,
|
||||
},
|
||||
}
|
||||
err := inputConfig.generatePodAndNodeTopology(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
52
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/test-performance.sh
generated
vendored
Executable file
52
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/test-performance.sh
generated
vendored
Executable file
@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../../../
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
kube::golang::setup_env
|
||||
|
||||
DIR_BASENAME=$(dirname "${BASH_SOURCE}")
|
||||
pushd ${DIR_BASENAME}
|
||||
|
||||
cleanup() {
|
||||
popd 2> /dev/null
|
||||
kube::etcd::cleanup
|
||||
kube::log::status "performance test cleanup complete"
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
kube::etcd::start
|
||||
|
||||
# We are using the benchmark suite to do profiling. Because it only runs a few pods and
|
||||
# theoretically it has less variance.
|
||||
if ${RUN_BENCHMARK:-false}; then
|
||||
kube::log::status "performance test (benchmark) compiling"
|
||||
go test -c -o "perf.test"
|
||||
|
||||
kube::log::status "performance test (benchmark) start"
|
||||
"./perf.test" -test.bench=. -test.run=xxxx -test.cpuprofile=prof.out -test.short=false
|
||||
kube::log::status "...benchmark tests finished"
|
||||
fi
|
||||
# Running density tests. It might take a long time.
|
||||
kube::log::status "performance test (density) start"
|
||||
go test -test.run=. -test.timeout=60m -test.short=false
|
||||
kube::log::status "...density tests finished"
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user