mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
2
vendor/k8s.io/kubernetes/test/integration/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/BUILD
generated
vendored
@ -48,6 +48,7 @@ filegroup(
|
||||
"//test/integration/examples:all-srcs",
|
||||
"//test/integration/framework:all-srcs",
|
||||
"//test/integration/garbagecollector:all-srcs",
|
||||
"//test/integration/ipamperf:all-srcs",
|
||||
"//test/integration/master:all-srcs",
|
||||
"//test/integration/metrics:all-srcs",
|
||||
"//test/integration/objectmeta:all-srcs",
|
||||
@ -61,6 +62,7 @@ filegroup(
|
||||
"//test/integration/scheduler_perf:all-srcs",
|
||||
"//test/integration/secrets:all-srcs",
|
||||
"//test/integration/serviceaccount:all-srcs",
|
||||
"//test/integration/statefulset:all-srcs",
|
||||
"//test/integration/storageclasses:all-srcs",
|
||||
"//test/integration/tls:all-srcs",
|
||||
"//test/integration/ttlcontroller:all-srcs",
|
||||
|
19
vendor/k8s.io/kubernetes/test/integration/apiserver/BUILD
generated
vendored
19
vendor/k8s.io/kubernetes/test/integration/apiserver/BUILD
generated
vendored
@ -12,30 +12,47 @@ go_test(
|
||||
"apiserver_test.go",
|
||||
"main_test.go",
|
||||
"patch_test.go",
|
||||
"print_test.go",
|
||||
],
|
||||
tags = [
|
||||
"etcd",
|
||||
"integration",
|
||||
],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/kubectl/cmd/util:go_default_library",
|
||||
"//pkg/kubectl/genericclioptions:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//pkg/printers:go_default_library",
|
||||
"//pkg/printers/internalversion:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/pborman/uuid:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v2alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/scheduling/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/settings/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/endpoints/handlers:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/features:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/pager:go_default_library",
|
||||
"//vendor/k8s.io/gengo/examples/set-gen/sets:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
93
vendor/k8s.io/kubernetes/test/integration/apiserver/apiserver_test.go
generated
vendored
93
vendor/k8s.io/kubernetes/test/integration/apiserver/apiserver_test.go
generated
vendored
@ -32,6 +32,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
genericfeatures "k8s.io/apiserver/pkg/features"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -39,12 +40,17 @@ import (
|
||||
"k8s.io/client-go/tools/pager"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/master"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func setup(t *testing.T) (*httptest.Server, clientset.Interface, framework.CloseFunc) {
|
||||
func setup(t *testing.T, groupVersions ...schema.GroupVersion) (*httptest.Server, clientset.Interface, framework.CloseFunc) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.ExtraConfig.EnableCoreControllers = false
|
||||
if len(groupVersions) > 0 {
|
||||
resourceConfig := master.DefaultAPIResourceConfigSource()
|
||||
resourceConfig.EnableVersions(groupVersions...)
|
||||
masterConfig.ExtraConfig.APIResourceConfigSource = resourceConfig
|
||||
}
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
|
||||
clientSet, err := clientset.NewForConfig(&restclient.Config{Host: s.URL})
|
||||
@ -232,3 +238,86 @@ func TestAPIListChunking(t *testing.T) {
|
||||
t.Errorf("unexpected items: %#v", list)
|
||||
}
|
||||
}
|
||||
|
||||
func makeSecret(name string) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"key": []byte("value"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestNameInFieldSelector(t *testing.T) {
|
||||
s, clientSet, closeFn := setup(t)
|
||||
defer closeFn()
|
||||
|
||||
numNamespaces := 3
|
||||
namespaces := make([]*v1.Namespace, 0, numNamespaces)
|
||||
for i := 0; i < 3; i++ {
|
||||
ns := framework.CreateTestingNamespace(fmt.Sprintf("ns%d", i), s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
namespaces = append(namespaces, ns)
|
||||
|
||||
_, err := clientSet.CoreV1().Secrets(ns.Name).Create(makeSecret("foo"))
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't create secret: %v", err)
|
||||
}
|
||||
_, err = clientSet.CoreV1().Secrets(ns.Name).Create(makeSecret("bar"))
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't create secret: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
testcases := []struct {
|
||||
namespace string
|
||||
selector string
|
||||
expectedSecrets int
|
||||
}{
|
||||
{
|
||||
namespace: "",
|
||||
selector: "metadata.name=foo",
|
||||
expectedSecrets: numNamespaces,
|
||||
},
|
||||
{
|
||||
namespace: "",
|
||||
selector: "metadata.name=foo,metadata.name=bar",
|
||||
expectedSecrets: 0,
|
||||
},
|
||||
{
|
||||
namespace: "",
|
||||
selector: "metadata.name=foo,metadata.namespace=ns1",
|
||||
expectedSecrets: 1,
|
||||
},
|
||||
{
|
||||
namespace: "ns1",
|
||||
selector: "metadata.name=foo,metadata.namespace=ns1",
|
||||
expectedSecrets: 1,
|
||||
},
|
||||
{
|
||||
namespace: "ns1",
|
||||
selector: "metadata.name=foo,metadata.namespace=ns2",
|
||||
expectedSecrets: 0,
|
||||
},
|
||||
{
|
||||
namespace: "ns1",
|
||||
selector: "metadata.name=foo,metadata.namespace=",
|
||||
expectedSecrets: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
opts := metav1.ListOptions{
|
||||
FieldSelector: tc.selector,
|
||||
}
|
||||
secrets, err := clientSet.CoreV1().Secrets(tc.namespace).List(opts)
|
||||
if err != nil {
|
||||
t.Errorf("%s: Unexpected error: %v", tc.selector, err)
|
||||
}
|
||||
if len(secrets.Items) != tc.expectedSecrets {
|
||||
t.Errorf("%s: Unexpected number of secrets: %d, expected: %d", tc.selector, len(secrets.Items), tc.expectedSecrets)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
68
vendor/k8s.io/kubernetes/test/integration/apiserver/patch_test.go
generated
vendored
68
vendor/k8s.io/kubernetes/test/integration/apiserver/patch_test.go
generated
vendored
@ -24,18 +24,15 @@ import (
|
||||
|
||||
"github.com/pborman/uuid"
|
||||
|
||||
"reflect"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
// Tests that the apiserver retries non-overlapping conflicts on patches
|
||||
// Tests that the apiserver retries patches
|
||||
func TestPatchConflicts(t *testing.T) {
|
||||
s, clientSet, closeFn := setup(t)
|
||||
defer closeFn()
|
||||
@ -43,28 +40,41 @@ func TestPatchConflicts(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("status-code", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
// Create the object we're going to conflict on
|
||||
clientSet.CoreV1().Secrets(ns.Name).Create(&v1.Secret{
|
||||
numOfConcurrentPatches := 100
|
||||
|
||||
UIDs := make([]types.UID, numOfConcurrentPatches)
|
||||
ownerRefs := []metav1.OwnerReference{}
|
||||
for i := 0; i < numOfConcurrentPatches; i++ {
|
||||
uid := types.UID(uuid.NewRandom().String())
|
||||
ownerName := fmt.Sprintf("owner-%d", i)
|
||||
UIDs[i] = uid
|
||||
ownerRefs = append(ownerRefs, metav1.OwnerReference{
|
||||
APIVersion: "example.com/v1",
|
||||
Kind: "Foo",
|
||||
Name: ownerName,
|
||||
UID: uid,
|
||||
})
|
||||
}
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
// Populate annotations so the strategic patch descends, compares, and notices the $patch directive
|
||||
Annotations: map[string]string{"initial": "value"},
|
||||
Name: "test",
|
||||
OwnerReferences: ownerRefs,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Create the object we're going to conflict on
|
||||
clientSet.CoreV1().Secrets(ns.Name).Create(secret)
|
||||
client := clientSet.CoreV1().RESTClient()
|
||||
|
||||
successes := int32(0)
|
||||
|
||||
// Run a lot of simultaneous patch operations to exercise internal API server retry of patch application.
|
||||
// Internally, a patch API call retries up to MaxRetryWhenPatchConflicts times if the resource version of the object has changed.
|
||||
// If the resource version of the object changed between attempts, that means another one of our patch requests succeeded.
|
||||
// That means if we run 2*MaxRetryWhenPatchConflicts patch attempts, we should see at least MaxRetryWhenPatchConflicts succeed.
|
||||
// Run a lot of simultaneous patch operations to exercise internal API server retry of application of patches that do not specify resourceVersion.
|
||||
// They should all succeed.
|
||||
wg := sync.WaitGroup{}
|
||||
for i := 0; i < (2 * handlers.MaxRetryWhenPatchConflicts); i++ {
|
||||
for i := 0; i < numOfConcurrentPatches; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
annotationName := fmt.Sprintf("annotation-%d", i)
|
||||
labelName := fmt.Sprintf("label-%d", i)
|
||||
value := uuid.NewRandom().String()
|
||||
|
||||
@ -72,7 +82,7 @@ func TestPatchConflicts(t *testing.T) {
|
||||
Namespace(ns.Name).
|
||||
Resource("secrets").
|
||||
Name("test").
|
||||
Body([]byte(fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}, "annotations":{"$patch":"replace","%s":"%s"}}}`, labelName, value, annotationName, value))).
|
||||
Body([]byte(fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}, "ownerReferences":[{"$patch":"delete","uid":"%s"}]}}`, labelName, value, UIDs[i]))).
|
||||
Do().
|
||||
Get()
|
||||
|
||||
@ -95,9 +105,14 @@ func TestPatchConflicts(t *testing.T) {
|
||||
t.Errorf("patch of %s was ineffective, expected %s=%s, got labels %#v", "secrets", labelName, value, accessor.GetLabels())
|
||||
return
|
||||
}
|
||||
// make sure the patch directive didn't get lost, and that the entire annotation map was replaced
|
||||
if !reflect.DeepEqual(accessor.GetAnnotations(), map[string]string{annotationName: value}) {
|
||||
t.Errorf("patch of %s with $patch directive was ineffective, didn't replace entire annotations map: %#v", "secrets", accessor.GetAnnotations())
|
||||
// make sure the patch directive didn't get lost, and that an entry in the ownerReference list was deleted.
|
||||
found := findOwnerRefByUID(accessor.GetOwnerReferences(), UIDs[i])
|
||||
if err != nil {
|
||||
t.Errorf("%v", err)
|
||||
return
|
||||
}
|
||||
if found {
|
||||
t.Errorf("patch of %s with $patch directive was ineffective, didn't delete the entry in the ownerReference slice: %#v", "secrets", UIDs[i])
|
||||
}
|
||||
|
||||
atomic.AddInt32(&successes, 1)
|
||||
@ -105,10 +120,19 @@ func TestPatchConflicts(t *testing.T) {
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
if successes < handlers.MaxRetryWhenPatchConflicts {
|
||||
t.Errorf("Expected at least %d successful patches for %s, got %d", handlers.MaxRetryWhenPatchConflicts, "secrets", successes)
|
||||
if successes < int32(numOfConcurrentPatches) {
|
||||
t.Errorf("Expected at least %d successful patches for %s, got %d", numOfConcurrentPatches, "secrets", successes)
|
||||
} else {
|
||||
t.Logf("Got %d successful patches for %s", successes, "secrets")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func findOwnerRefByUID(ownerRefs []metav1.OwnerReference, uid types.UID) bool {
|
||||
for _, of := range ownerRefs {
|
||||
if of.UID == uid {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
304
vendor/k8s.io/kubernetes/test/integration/apiserver/print_test.go
generated
vendored
Normal file
304
vendor/k8s.io/kubernetes/test/integration/apiserver/print_test.go
generated
vendored
Normal file
@ -0,0 +1,304 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apiserver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
batchv2alpha1 "k8s.io/api/batch/v2alpha1"
|
||||
rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
|
||||
schedulerapi "k8s.io/api/scheduling/v1beta1"
|
||||
settingsv1alpha1 "k8s.io/api/settings/v1alpha1"
|
||||
storagev1alpha1 "k8s.io/api/storage/v1alpha1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/gengo/examples/set-gen/sets"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
"k8s.io/kubernetes/pkg/kubectl/genericclioptions"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
var kindWhiteList = sets.NewString(
|
||||
// k8s.io/api/core
|
||||
"APIGroup",
|
||||
"APIVersions",
|
||||
"Binding",
|
||||
"DeleteOptions",
|
||||
"ExportOptions",
|
||||
"GetOptions",
|
||||
"ListOptions",
|
||||
"NodeProxyOptions",
|
||||
"PodAttachOptions",
|
||||
"PodExecOptions",
|
||||
"PodPortForwardOptions",
|
||||
"PodLogOptions",
|
||||
"PodProxyOptions",
|
||||
"PodStatusResult",
|
||||
"RangeAllocation",
|
||||
"ServiceProxyOptions",
|
||||
"SerializedReference",
|
||||
// --
|
||||
|
||||
// k8s.io/api/admission
|
||||
"AdmissionReview",
|
||||
// --
|
||||
|
||||
// k8s.io/api/admissionregistration
|
||||
"InitializerConfiguration",
|
||||
// --
|
||||
|
||||
// k8s.io/api/authentication
|
||||
"TokenRequest",
|
||||
"TokenReview",
|
||||
// --
|
||||
|
||||
// k8s.io/api/authorization
|
||||
"LocalSubjectAccessReview",
|
||||
"SelfSubjectAccessReview",
|
||||
"SelfSubjectRulesReview",
|
||||
"SubjectAccessReview",
|
||||
// --
|
||||
|
||||
// k8s.io/api/autoscaling
|
||||
"Scale",
|
||||
// --
|
||||
|
||||
// k8s.io/api/apps
|
||||
"DeploymentRollback",
|
||||
// --
|
||||
|
||||
// k8s.io/api/batch
|
||||
"JobTemplate",
|
||||
// --
|
||||
|
||||
// k8s.io/api/extensions
|
||||
"ReplicationControllerDummy",
|
||||
// --
|
||||
|
||||
// k8s.io/api/imagepolicy
|
||||
"ImageReview",
|
||||
// --
|
||||
|
||||
// k8s.io/api/policy
|
||||
"Eviction",
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/componentconfig
|
||||
"KubeSchedulerConfiguration",
|
||||
// --
|
||||
|
||||
// k8s.io/apimachinery/pkg/apis/meta
|
||||
"WatchEvent",
|
||||
"Status",
|
||||
// --
|
||||
)
|
||||
|
||||
// TODO (soltysh): this list has to go down to 0!
|
||||
var missingHanlders = sets.NewString(
|
||||
"ClusterRole",
|
||||
"LimitRange",
|
||||
"MutatingWebhookConfiguration",
|
||||
"ResourceQuota",
|
||||
"Role",
|
||||
"ValidatingWebhookConfiguration",
|
||||
"VolumeAttachment",
|
||||
"PriorityClass",
|
||||
"PodPreset",
|
||||
)
|
||||
|
||||
func TestServerSidePrint(t *testing.T) {
|
||||
s, _, closeFn := setup(t,
|
||||
// additional groupversions needed for the test to run
|
||||
batchv2alpha1.SchemeGroupVersion,
|
||||
rbacv1alpha1.SchemeGroupVersion,
|
||||
settingsv1alpha1.SchemeGroupVersion,
|
||||
schedulerapi.SchemeGroupVersion,
|
||||
storagev1alpha1.SchemeGroupVersion)
|
||||
defer closeFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("server-print", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
tableParam := fmt.Sprintf("application/json;as=Table;g=%s;v=%s, application/json", metav1beta1.GroupName, metav1beta1.SchemeGroupVersion.Version)
|
||||
printer := newFakePrinter(printersinternal.AddHandlers)
|
||||
|
||||
configFlags := genericclioptions.NewTestConfigFlags().
|
||||
WithClientConfig(clientcmd.NewDefaultClientConfig(*createKubeConfig(s.URL), &clientcmd.ConfigOverrides{}))
|
||||
|
||||
restConfig, err := configFlags.ToRESTConfig()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
cacheDir, err := ioutil.TempDir(os.TempDir(), "test-integration-apiserver-print")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
os.Remove(cacheDir)
|
||||
}()
|
||||
|
||||
cachedClient, err := discovery.NewCachedDiscoveryClientForConfig(restConfig, cacheDir, "", time.Duration(10*time.Minute))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
configFlags.WithDiscoveryClient(cachedClient)
|
||||
|
||||
factory := util.NewFactory(configFlags)
|
||||
mapper, err := factory.ToRESTMapper()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error getting mapper: %v", err)
|
||||
return
|
||||
}
|
||||
for gvk, apiType := range legacyscheme.Scheme.AllKnownTypes() {
|
||||
// we do not care about internal objects or lists // TODO make sure this is always true
|
||||
if gvk.Version == runtime.APIVersionInternal || strings.HasSuffix(apiType.Name(), "List") {
|
||||
continue
|
||||
}
|
||||
if kindWhiteList.Has(gvk.Kind) || missingHanlders.Has(gvk.Kind) {
|
||||
continue
|
||||
}
|
||||
|
||||
t.Logf("Checking %s", gvk)
|
||||
// read table definition as returned by the server
|
||||
mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error getting mapping for GVK %s: %v", gvk, err)
|
||||
continue
|
||||
}
|
||||
client, err := factory.ClientForMapping(mapping)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error getting client for GVK %s: %v", gvk, err)
|
||||
continue
|
||||
}
|
||||
req := client.Get()
|
||||
if mapping.Scope.Name() == meta.RESTScopeNameNamespace {
|
||||
req = req.Namespace(ns.Name)
|
||||
}
|
||||
body, err := req.Resource(mapping.Resource.Resource).SetHeader("Accept", tableParam).Do().Raw()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error getting %s: %v", gvk, err)
|
||||
continue
|
||||
}
|
||||
actual, err := decodeIntoTable(body)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error decoding %s: %v", gvk, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// get table definition used in printers
|
||||
obj, err := legacyscheme.Scheme.New(gvk)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error creating %s: %v", gvk, err)
|
||||
continue
|
||||
}
|
||||
intGV := gvk.GroupKind().WithVersion(runtime.APIVersionInternal).GroupVersion()
|
||||
intObj, err := legacyscheme.Scheme.ConvertToVersion(obj, intGV)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error converting %s to internal: %v", gvk, err)
|
||||
continue
|
||||
}
|
||||
expectedColumnDefinitions, ok := printer.handlers[reflect.TypeOf(intObj)]
|
||||
if !ok {
|
||||
t.Errorf("missing handler for type %v", gvk)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, e := range expectedColumnDefinitions {
|
||||
for _, a := range actual.ColumnDefinitions {
|
||||
if a.Name == e.Name && !reflect.DeepEqual(a, e) {
|
||||
t.Errorf("unexpected difference in column definition %s for %s:\nexpected:\n%#v\nactual:\n%#v\n", e.Name, gvk, e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type fakePrinter struct {
|
||||
handlers map[reflect.Type][]metav1beta1.TableColumnDefinition
|
||||
}
|
||||
|
||||
var _ printers.PrintHandler = &fakePrinter{}
|
||||
|
||||
func (f *fakePrinter) Handler(columns, columnsWithWide []string, printFunc interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakePrinter) TableHandler(columns []metav1beta1.TableColumnDefinition, printFunc interface{}) error {
|
||||
printFuncValue := reflect.ValueOf(printFunc)
|
||||
objType := printFuncValue.Type().In(0)
|
||||
f.handlers[objType] = columns
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakePrinter) DefaultTableHandler(columns []metav1beta1.TableColumnDefinition, printFunc interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newFakePrinter(fns ...func(printers.PrintHandler)) *fakePrinter {
|
||||
handlers := make(map[reflect.Type][]metav1beta1.TableColumnDefinition, len(fns))
|
||||
p := &fakePrinter{handlers: handlers}
|
||||
for _, fn := range fns {
|
||||
fn(p)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func decodeIntoTable(body []byte) (*metav1beta1.Table, error) {
|
||||
table := &metav1beta1.Table{}
|
||||
err := json.Unmarshal(body, table)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return table, nil
|
||||
}
|
||||
|
||||
func createKubeConfig(url string) *clientcmdapi.Config {
|
||||
clusterNick := "cluster"
|
||||
userNick := "user"
|
||||
contextNick := "context"
|
||||
|
||||
config := clientcmdapi.NewConfig()
|
||||
|
||||
cluster := clientcmdapi.NewCluster()
|
||||
cluster.Server = url
|
||||
cluster.InsecureSkipTLSVerify = true
|
||||
config.Clusters[clusterNick] = cluster
|
||||
|
||||
context := clientcmdapi.NewContext()
|
||||
context.Cluster = clusterNick
|
||||
context.AuthInfo = userNick
|
||||
config.Contexts[contextNick] = context
|
||||
config.CurrentContext = contextNick
|
||||
|
||||
return config
|
||||
}
|
2
vendor/k8s.io/kubernetes/test/integration/auth/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/auth/BUILD
generated
vendored
@ -44,7 +44,6 @@ go_test(
|
||||
"//pkg/registry/rbac/rolebinding:go_default_library",
|
||||
"//pkg/registry/rbac/rolebinding/storage:go_default_library",
|
||||
"//pkg/serviceaccount:go_default_library",
|
||||
"//plugin/pkg/admission/admit:go_default_library",
|
||||
"//plugin/pkg/admission/noderestriction:go_default_library",
|
||||
"//plugin/pkg/auth/authenticator/token/bootstrap:go_default_library",
|
||||
"//plugin/pkg/auth/authorizer/rbac:go_default_library",
|
||||
@ -52,6 +51,7 @@ go_test(
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/gopkg.in/square/go-jose.v2/jwt:go_default_library",
|
||||
"//vendor/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/authentication/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
|
4
vendor/k8s.io/kubernetes/test/integration/auth/accessreview_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/auth/accessreview_test.go
generated
vendored
@ -31,7 +31,6 @@ import (
|
||||
authorizationapi "k8s.io/kubernetes/pkg/apis/authorization"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/admit"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
@ -57,7 +56,6 @@ func TestSubjectAccessReview(t *testing.T) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = authenticator.RequestFunc(alwaysAlice)
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = sarAuthorizer{}
|
||||
masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
@ -151,7 +149,6 @@ func TestSelfSubjectAccessReview(t *testing.T) {
|
||||
return &user.DefaultInfo{Name: username}, true, nil
|
||||
})
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = sarAuthorizer{}
|
||||
masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
@ -231,7 +228,6 @@ func TestLocalSubjectAccessReview(t *testing.T) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = authenticator.RequestFunc(alwaysAlice)
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = sarAuthorizer{}
|
||||
masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/auth/auth_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/auth/auth_test.go
generated
vendored
@ -51,7 +51,6 @@ import (
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/auth/authorizer/abac"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/admit"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
@ -555,7 +554,6 @@ func TestAliceNotForbiddenOrUnauthorized(t *testing.T) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = getTestTokenAuth()
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = allowAliceAuthorizer{}
|
||||
masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/auth/bootstraptoken_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/auth/bootstraptoken_test.go
generated
vendored
@ -29,7 +29,6 @@ import (
|
||||
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
|
||||
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/admit"
|
||||
"k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap"
|
||||
bootstraputil "k8s.io/kubernetes/test/e2e/lifecycle/bootstrap"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
@ -126,7 +125,6 @@ func TestBootstrapTokenAuth(t *testing.T) {
|
||||
// Set up a master
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = authenticator
|
||||
masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
|
62
vendor/k8s.io/kubernetes/test/integration/auth/node_test.go
generated
vendored
62
vendor/k8s.io/kubernetes/test/integration/auth/node_test.go
generated
vendored
@ -81,6 +81,9 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
// Enabled CSIPersistentVolume feature at startup so volumeattachments get watched
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIPersistentVolume, true)()
|
||||
|
||||
// Enable DynamicKubeletConfig feature so that Node.Spec.ConfigSource can be set
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DynamicKubeletConfig, true)()
|
||||
|
||||
// Set up Node+RBAC authorizer
|
||||
authorizerConfig := &authorizer.AuthorizationConfig{
|
||||
AuthorizationModes: []string{"Node", "RBAC"},
|
||||
@ -94,7 +97,7 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
|
||||
// Set up NodeRestriction admission
|
||||
nodeRestrictionAdmission := noderestriction.NewPlugin(nodeidentifier.NewDefaultNodeIdentifier())
|
||||
nodeRestrictionAdmission.SetInternalKubeClientSet(superuserClient)
|
||||
nodeRestrictionAdmission.SetInternalKubeInformerFactory(informerFactory)
|
||||
if err := nodeRestrictionAdmission.ValidateInitialization(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -135,6 +138,9 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
if _, err := superuserClient.Core().ConfigMaps("ns").Create(&api.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "myconfigmap"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := superuserClient.Core().ConfigMaps("ns").Create(&api.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "myconfigmapconfigsource"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pvName := "mypv"
|
||||
if _, err := superuserClientExternal.StorageV1beta1().VolumeAttachments().Create(&storagev1beta1.VolumeAttachment{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "myattachment"},
|
||||
@ -186,6 +192,12 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
getConfigMapConfigSource := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.Core().ConfigMaps("ns").Get("myconfigmapconfigsource", metav1.GetOptions{})
|
||||
return err
|
||||
}
|
||||
}
|
||||
getPVC := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.Core().PersistentVolumeClaims("ns").Get("mypvc", metav1.GetOptions{})
|
||||
@ -267,6 +279,34 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
setNode2ConfigSource := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
node2, err := client.Core().Nodes().Get("node2", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
node2.Spec.ConfigSource = &api.NodeConfigSource{
|
||||
ConfigMap: &api.ConfigMapNodeConfigSource{
|
||||
Namespace: "ns",
|
||||
Name: "myconfigmapconfigsource",
|
||||
KubeletConfigKey: "kubelet",
|
||||
},
|
||||
}
|
||||
_, err = client.Core().Nodes().Update(node2)
|
||||
return err
|
||||
}
|
||||
}
|
||||
unsetNode2ConfigSource := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
node2, err := client.Core().Nodes().Get("node2", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
node2.Spec.ConfigSource = nil
|
||||
_, err = client.Core().Nodes().Update(node2)
|
||||
return err
|
||||
}
|
||||
}
|
||||
updateNode2Status := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.Core().Nodes().UpdateStatus(&api.Node{
|
||||
@ -420,6 +460,7 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
expectAllowed(t, deleteNode2NormalPod(node2Client))
|
||||
expectAllowed(t, createNode2MirrorPod(node2Client))
|
||||
expectAllowed(t, deleteNode2MirrorPod(node2Client))
|
||||
|
||||
// recreate as an admin to test eviction
|
||||
expectAllowed(t, createNode2NormalPod(superuserClient))
|
||||
expectAllowed(t, createNode2MirrorPod(superuserClient))
|
||||
@ -449,6 +490,25 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
expectForbidden(t, getVolumeAttachment(node1ClientExternal))
|
||||
expectAllowed(t, getVolumeAttachment(node2ClientExternal))
|
||||
|
||||
// create node2 again
|
||||
expectAllowed(t, createNode2(node2Client))
|
||||
// node2 can not set its own config source
|
||||
expectForbidden(t, setNode2ConfigSource(node2Client))
|
||||
// node2 can not access the configmap config source yet
|
||||
expectForbidden(t, getConfigMapConfigSource(node2Client))
|
||||
// superuser can access the configmap config source
|
||||
expectAllowed(t, getConfigMapConfigSource(superuserClient))
|
||||
// superuser can set node2's config source
|
||||
expectAllowed(t, setNode2ConfigSource(superuserClient))
|
||||
// node2 can now get the configmap assigned as its config source
|
||||
expectAllowed(t, getConfigMapConfigSource(node2Client))
|
||||
// superuser can unset node2's config source
|
||||
expectAllowed(t, unsetNode2ConfigSource(superuserClient))
|
||||
// node2 can no longer get the configmap after it is unassigned as its config source
|
||||
expectForbidden(t, getConfigMapConfigSource(node2Client))
|
||||
// clean up node2
|
||||
expectAllowed(t, deleteNode2(node2Client))
|
||||
|
||||
//TODO(mikedanese): integration test node restriction of TokenRequest
|
||||
}
|
||||
|
||||
|
120
vendor/k8s.io/kubernetes/test/integration/auth/svcaccttoken_test.go
generated
vendored
120
vendor/k8s.io/kubernetes/test/integration/auth/svcaccttoken_test.go
generated
vendored
@ -24,17 +24,20 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gopkg.in/square/go-jose.v2/jwt"
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
|
||||
apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizerfactory"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
externalclientset "k8s.io/client-go/kubernetes"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
"k8s.io/kubernetes/pkg/apis/core"
|
||||
serviceaccountgetter "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
@ -118,7 +121,6 @@ func TestServiceAccountTokenCreate(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
one = int64(1)
|
||||
wrongUID = types.UID("wrong")
|
||||
noUID = types.UID("")
|
||||
)
|
||||
@ -126,8 +128,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
|
||||
t.Run("bound to service account", func(t *testing.T) {
|
||||
treq := &authenticationv1.TokenRequest{
|
||||
Spec: authenticationv1.TokenRequestSpec{
|
||||
Audiences: []string{"api"},
|
||||
ExpirationSeconds: &one,
|
||||
Audiences: []string{"api"},
|
||||
},
|
||||
}
|
||||
|
||||
@ -157,8 +158,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
|
||||
t.Run("bound to service account and pod", func(t *testing.T) {
|
||||
treq := &authenticationv1.TokenRequest{
|
||||
Spec: authenticationv1.TokenRequestSpec{
|
||||
Audiences: []string{"api"},
|
||||
ExpirationSeconds: &one,
|
||||
Audiences: []string{"api"},
|
||||
BoundObjectRef: &authenticationv1.BoundObjectReference{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
@ -211,8 +211,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
|
||||
t.Run("bound to service account and secret", func(t *testing.T) {
|
||||
treq := &authenticationv1.TokenRequest{
|
||||
Spec: authenticationv1.TokenRequestSpec{
|
||||
Audiences: []string{"api"},
|
||||
ExpirationSeconds: &one,
|
||||
Audiences: []string{"api"},
|
||||
BoundObjectRef: &authenticationv1.BoundObjectReference{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
@ -266,8 +265,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
|
||||
t.Run("bound to service account and pod running as different service account", func(t *testing.T) {
|
||||
treq := &authenticationv1.TokenRequest{
|
||||
Spec: authenticationv1.TokenRequestSpec{
|
||||
Audiences: []string{"api"},
|
||||
ExpirationSeconds: &one,
|
||||
Audiences: []string{"api"},
|
||||
BoundObjectRef: &authenticationv1.BoundObjectReference{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
@ -289,8 +287,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
|
||||
t.Run("expired token", func(t *testing.T) {
|
||||
treq := &authenticationv1.TokenRequest{
|
||||
Spec: authenticationv1.TokenRequestSpec{
|
||||
Audiences: []string{"api"},
|
||||
ExpirationSeconds: &one,
|
||||
Audiences: []string{"api"},
|
||||
},
|
||||
}
|
||||
|
||||
@ -303,7 +300,26 @@ func TestServiceAccountTokenCreate(t *testing.T) {
|
||||
}
|
||||
|
||||
doTokenReview(t, cs, treq, false)
|
||||
time.Sleep(63 * time.Second)
|
||||
|
||||
// backdate the token
|
||||
then := time.Now().Add(-2 * time.Hour)
|
||||
sc := &jwt.Claims{
|
||||
Subject: apiserverserviceaccount.MakeUsername(sa.Namespace, sa.Name),
|
||||
Audience: jwt.Audience([]string{"api"}),
|
||||
IssuedAt: jwt.NewNumericDate(then),
|
||||
NotBefore: jwt.NewNumericDate(then),
|
||||
Expiry: jwt.NewNumericDate(then.Add(time.Duration(60*60) * time.Second)),
|
||||
}
|
||||
coresa := core.ServiceAccount{
|
||||
ObjectMeta: sa.ObjectMeta,
|
||||
}
|
||||
_, pc := serviceaccount.Claims(coresa, nil, nil, 0, nil)
|
||||
tok, err := masterConfig.ExtraConfig.ServiceAccountIssuer.GenerateToken(sc, pc)
|
||||
if err != nil {
|
||||
t.Fatalf("err signing expired token: %v", err)
|
||||
}
|
||||
|
||||
treq.Status.Token = tok
|
||||
doTokenReview(t, cs, treq, true)
|
||||
})
|
||||
|
||||
@ -342,6 +358,86 @@ func TestServiceAccountTokenCreate(t *testing.T) {
|
||||
|
||||
doTokenReview(t, cs, treq, false)
|
||||
})
|
||||
|
||||
t.Run("a token should be invalid after recreating same name pod", func(t *testing.T) {
|
||||
treq := &authenticationv1.TokenRequest{
|
||||
Spec: authenticationv1.TokenRequestSpec{
|
||||
Audiences: []string{"api"},
|
||||
BoundObjectRef: &authenticationv1.BoundObjectReference{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
Name: pod.Name,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
sa, del := createDeleteSvcAcct(t, cs, sa)
|
||||
defer del()
|
||||
originalPod, originalDelPod := createDeletePod(t, cs, pod)
|
||||
defer originalDelPod()
|
||||
|
||||
treq.Spec.BoundObjectRef.UID = originalPod.UID
|
||||
if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
checkPayload(t, treq.Status.Token, `"system:serviceaccount:myns:test-svcacct"`, "sub")
|
||||
checkPayload(t, treq.Status.Token, `["api"]`, "aud")
|
||||
checkPayload(t, treq.Status.Token, `"test-pod"`, "kubernetes.io", "pod", "name")
|
||||
checkPayload(t, treq.Status.Token, "null", "kubernetes.io", "secret")
|
||||
checkPayload(t, treq.Status.Token, `"myns"`, "kubernetes.io", "namespace")
|
||||
checkPayload(t, treq.Status.Token, `"test-svcacct"`, "kubernetes.io", "serviceaccount", "name")
|
||||
|
||||
doTokenReview(t, cs, treq, false)
|
||||
originalDelPod()
|
||||
doTokenReview(t, cs, treq, true)
|
||||
|
||||
_, recreateDelPod := createDeletePod(t, cs, pod)
|
||||
defer recreateDelPod()
|
||||
|
||||
doTokenReview(t, cs, treq, true)
|
||||
})
|
||||
|
||||
t.Run("a token should be invalid after recreating same name secret", func(t *testing.T) {
|
||||
treq := &authenticationv1.TokenRequest{
|
||||
Spec: authenticationv1.TokenRequestSpec{
|
||||
Audiences: []string{"api"},
|
||||
BoundObjectRef: &authenticationv1.BoundObjectReference{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
Name: secret.Name,
|
||||
UID: secret.UID,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
sa, del := createDeleteSvcAcct(t, cs, sa)
|
||||
defer del()
|
||||
|
||||
originalSecret, originalDelSecret := createDeleteSecret(t, cs, secret)
|
||||
defer originalDelSecret()
|
||||
|
||||
treq.Spec.BoundObjectRef.UID = originalSecret.UID
|
||||
if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
checkPayload(t, treq.Status.Token, `"system:serviceaccount:myns:test-svcacct"`, "sub")
|
||||
checkPayload(t, treq.Status.Token, `["api"]`, "aud")
|
||||
checkPayload(t, treq.Status.Token, `null`, "kubernetes.io", "pod")
|
||||
checkPayload(t, treq.Status.Token, `"test-secret"`, "kubernetes.io", "secret", "name")
|
||||
checkPayload(t, treq.Status.Token, `"myns"`, "kubernetes.io", "namespace")
|
||||
checkPayload(t, treq.Status.Token, `"test-svcacct"`, "kubernetes.io", "serviceaccount", "name")
|
||||
|
||||
doTokenReview(t, cs, treq, false)
|
||||
originalDelSecret()
|
||||
doTokenReview(t, cs, treq, true)
|
||||
|
||||
_, recreateDelSecret := createDeleteSecret(t, cs, secret)
|
||||
defer recreateDelSecret()
|
||||
|
||||
doTokenReview(t, cs, treq, true)
|
||||
})
|
||||
}
|
||||
|
||||
func doTokenReview(t *testing.T, cs externalclientset.Interface, treq *authenticationv1.TokenRequest, expectErr bool) {
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/benchmark-controller.json
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/benchmark-controller.json
generated
vendored
@ -17,7 +17,7 @@
|
||||
"spec": {
|
||||
"containers": [{
|
||||
"name": "test-container",
|
||||
"image": "k8s.gcr.io/pause-amd64:3.1"
|
||||
"image": "k8s.gcr.io/pause:3.1"
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/test/integration/client/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/test/integration/client/BUILD
generated
vendored
@ -18,11 +18,10 @@ go_test(
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
|
43
vendor/k8s.io/kubernetes/test/integration/client/client_test.go
generated
vendored
43
vendor/k8s.io/kubernetes/test/integration/client/client_test.go
generated
vendored
@ -27,7 +27,6 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -39,17 +38,16 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
e2e "k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
func TestClient(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("client", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
@ -121,7 +119,7 @@ func TestAtomicPut(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("atomic-put", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
@ -213,7 +211,7 @@ func TestPatch(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("patch", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
@ -286,23 +284,6 @@ func TestPatch(t *testing.T) {
|
||||
t.Logf("%v", string(jsonObj))
|
||||
}
|
||||
|
||||
obj, err := result.Get()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
metadata, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// this call waits for the resourceVersion to be reached in the cache before returning. We need to do this because
|
||||
// the patch gets its initial object from the storage, and the cache serves that. If it is out of date,
|
||||
// then our initial patch is applied to an old resource version, which conflicts and then the updated object shows
|
||||
// a conflicting diff, which permanently fails the patch. This gives expected stability in the patch without
|
||||
// retrying on an known number of conflicts below in the test.
|
||||
if _, err := c.Core().Pods(ns.Name).Get(name, metav1.GetOptions{ResourceVersion: metadata.GetResourceVersion()}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -352,7 +333,7 @@ func TestPatchWithCreateOnUpdate(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("patch-with-create", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
@ -463,7 +444,7 @@ func TestAPIVersions(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
clientVersion := c.Core().RESTClient().APIVersion().String()
|
||||
g, err := c.Discovery().ServerGroups()
|
||||
@ -488,7 +469,7 @@ func TestSingleWatch(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("single-watch", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
mkEvent := func(i int) *v1.Event {
|
||||
name := fmt.Sprintf("event-%v", i)
|
||||
@ -575,7 +556,7 @@ func TestMultiWatch(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("multi-watch", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
dummyEvent := func(i int) *v1.Event {
|
||||
name := fmt.Sprintf("unrelated-%v", i)
|
||||
@ -612,7 +593,7 @@ func TestMultiWatch(t *testing.T) {
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: e2e.GetPauseImageName(client),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
}},
|
||||
},
|
||||
})
|
||||
@ -718,7 +699,7 @@ func TestMultiWatch(t *testing.T) {
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "nothing",
|
||||
Image: e2e.GetPauseImageName(client),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
}},
|
||||
},
|
||||
})
|
||||
@ -749,7 +730,7 @@ func TestMultiWatch(t *testing.T) {
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Couldn't get %v: %v", name, err))
|
||||
}
|
||||
pod.Spec.Containers[0].Image = e2e.GetPauseImageName(client)
|
||||
pod.Spec.Containers[0].Image = imageutils.GetPauseImageName()
|
||||
sentTimes <- timePair{time.Now(), name}
|
||||
if _, err := client.Core().Pods(ns.Name).Update(pod); err != nil {
|
||||
panic(fmt.Sprintf("Couldn't make %v: %v", name, err))
|
||||
@ -831,7 +812,7 @@ func TestSelfLinkOnNamespace(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("selflink", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
runSelfLinkTestOnNamespace(t, c, ns.Name)
|
||||
}
|
||||
|
34
vendor/k8s.io/kubernetes/test/integration/client/dynamic_client_test.go
generated
vendored
34
vendor/k8s.io/kubernetes/test/integration/client/dynamic_client_test.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
@ -38,36 +39,19 @@ func TestDynamicClient(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("dynamic-client", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
gv := testapi.Groups[v1.GroupName].GroupVersion()
|
||||
gv := &schema.GroupVersion{Group: "", Version: "v1"}
|
||||
config := &restclient.Config{
|
||||
Host: s.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: gv},
|
||||
}
|
||||
|
||||
client := clientset.NewForConfigOrDie(config)
|
||||
dynamicClient, err := dynamic.NewClient(config)
|
||||
_ = dynamicClient
|
||||
dynamicClient, err := dynamic.NewForConfig(config)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating dynamic client: %v", err)
|
||||
}
|
||||
|
||||
// Find the Pod resource
|
||||
resources, err := client.Discovery().ServerResourcesForGroupVersion(gv.String())
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error listing resources: %v", err)
|
||||
}
|
||||
|
||||
var resource metav1.APIResource
|
||||
for _, r := range resources.APIResources {
|
||||
if r.Kind == "Pod" {
|
||||
resource = r
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(resource.Name) == 0 {
|
||||
t.Fatalf("could not find the pod resource in group/version %q", gv.String())
|
||||
}
|
||||
resource := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
|
||||
|
||||
// Create a Pod with the normal client
|
||||
pod := &v1.Pod{
|
||||
@ -90,11 +74,7 @@ func TestDynamicClient(t *testing.T) {
|
||||
}
|
||||
|
||||
// check dynamic list
|
||||
obj, err := dynamicClient.Resource(&resource, ns.Name).List(metav1.ListOptions{})
|
||||
unstructuredList, ok := obj.(*unstructured.UnstructuredList)
|
||||
if !ok {
|
||||
t.Fatalf("expected *unstructured.UnstructuredList, got %#v", obj)
|
||||
}
|
||||
unstructuredList, err := dynamicClient.Resource(resource).Namespace(ns.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when listing pods: %v", err)
|
||||
}
|
||||
@ -113,7 +93,7 @@ func TestDynamicClient(t *testing.T) {
|
||||
}
|
||||
|
||||
// check dynamic get
|
||||
unstruct, err := dynamicClient.Resource(&resource, ns.Name).Get(actual.Name, metav1.GetOptions{})
|
||||
unstruct, err := dynamicClient.Resource(resource).Namespace(ns.Name).Get(actual.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when getting pod %q: %v", actual.Name, err)
|
||||
}
|
||||
@ -128,7 +108,7 @@ func TestDynamicClient(t *testing.T) {
|
||||
}
|
||||
|
||||
// delete the pod dynamically
|
||||
err = dynamicClient.Resource(&resource, ns.Name).Delete(actual.Name, nil)
|
||||
err = dynamicClient.Resource(resource).Namespace(ns.Name).Delete(actual.Name, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when deleting pod: %v", err)
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/configmap/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/configmap/BUILD
generated
vendored
@ -14,11 +14,11 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
|
4
vendor/k8s.io/kubernetes/test/integration/configmap/configmap_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/configmap/configmap_test.go
generated
vendored
@ -23,9 +23,9 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
@ -35,7 +35,7 @@ func TestConfigMap(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("config-map", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
10
vendor/k8s.io/kubernetes/test/integration/daemonset/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/test/integration/daemonset/BUILD
generated
vendored
@ -14,22 +14,32 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/controller/daemon:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
480
vendor/k8s.io/kubernetes/test/integration/daemonset/daemonset_test.go
generated
vendored
480
vendor/k8s.io/kubernetes/test/integration/daemonset/daemonset_test.go
generated
vendored
@ -24,22 +24,36 @@ import (
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
appstyped "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
clientv1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corev1typed "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
var zero = int64(0)
|
||||
|
||||
func setup(t *testing.T) (*httptest.Server, framework.CloseFunc, *daemon.DaemonSetsController, informers.SharedInformerFactory, clientset.Interface) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
_, server, closeFn := framework.RunAMaster(masterConfig)
|
||||
@ -66,6 +80,62 @@ func setup(t *testing.T) (*httptest.Server, framework.CloseFunc, *daemon.DaemonS
|
||||
return server, closeFn, dc, informers, clientSet
|
||||
}
|
||||
|
||||
func setupScheduler(
|
||||
t *testing.T,
|
||||
cs clientset.Interface,
|
||||
informerFactory informers.SharedInformerFactory,
|
||||
stopCh chan struct{},
|
||||
) {
|
||||
// If ScheduleDaemonSetPods is disabled, do not start scheduler.
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) {
|
||||
return
|
||||
}
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
cs,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
true,
|
||||
false,
|
||||
)
|
||||
|
||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
|
||||
schedulerConfig.StopEverything = stopCh
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(
|
||||
legacyscheme.Scheme,
|
||||
v1.EventSource{Component: v1.DefaultSchedulerName},
|
||||
)
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{
|
||||
Interface: cs.CoreV1().Events(""),
|
||||
})
|
||||
|
||||
sched, err := scheduler.NewFromConfigurator(
|
||||
&scheduler.FakeConfigurator{Config: schedulerConfig}, nil...)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating scheduler: %v", err)
|
||||
}
|
||||
|
||||
algorithmprovider.ApplyFeatureGates()
|
||||
|
||||
go sched.Run()
|
||||
}
|
||||
|
||||
func testLabels() map[string]string {
|
||||
return map[string]string{"name": "test"}
|
||||
}
|
||||
@ -92,13 +162,55 @@ func newDaemonSet(name, namespace string) *apps.DaemonSet {
|
||||
Labels: testLabels(),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "foo", Image: "bar"}},
|
||||
Containers: []v1.Container{{Name: "foo", Image: "bar"}},
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func cleanupDaemonSets(t *testing.T, cs clientset.Interface, ds *apps.DaemonSet) {
|
||||
ds, err := cs.AppsV1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
// We set the nodeSelector to a random label. This label is nearly guaranteed
|
||||
// to not be set on any node so the DameonSetController will start deleting
|
||||
// daemon pods. Once it's done deleting the daemon pods, it's safe to delete
|
||||
// the DaemonSet.
|
||||
ds.Spec.Template.Spec.NodeSelector = map[string]string{
|
||||
string(uuid.NewUUID()): string(uuid.NewUUID()),
|
||||
}
|
||||
// force update to avoid version conflict
|
||||
ds.ResourceVersion = ""
|
||||
|
||||
if ds, err = cs.AppsV1().DaemonSets(ds.Namespace).Update(ds); err != nil {
|
||||
t.Errorf("Failed to update DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Wait for the daemon set controller to kill all the daemon pods.
|
||||
if err := wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
updatedDS, err := cs.AppsV1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return updatedDS.Status.CurrentNumberScheduled+updatedDS.Status.NumberMisscheduled == 0, nil
|
||||
}); err != nil {
|
||||
t.Errorf("Failed to kill the pods of DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
falseVar := false
|
||||
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
if err := cs.AppsV1().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions); err != nil {
|
||||
t.Errorf("Failed to delete DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func newRollbackStrategy() *apps.DaemonSetUpdateStrategy {
|
||||
one := intstr.FromInt(1)
|
||||
return &apps.DaemonSetUpdateStrategy{
|
||||
@ -117,6 +229,12 @@ func updateStrategies() []*apps.DaemonSetUpdateStrategy {
|
||||
return []*apps.DaemonSetUpdateStrategy{newOnDeleteStrategy(), newRollbackStrategy()}
|
||||
}
|
||||
|
||||
func featureGates() []utilfeature.Feature {
|
||||
return []utilfeature.Feature{
|
||||
features.ScheduleDaemonSetPods,
|
||||
}
|
||||
}
|
||||
|
||||
func allocatableResources(memory, cpu string) v1.ResourceList {
|
||||
return v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse(memory),
|
||||
@ -140,6 +258,7 @@ func resourcePodSpec(nodeName, memory, cpu string) v1.PodSpec {
|
||||
},
|
||||
},
|
||||
},
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
}
|
||||
}
|
||||
|
||||
@ -196,7 +315,7 @@ func validateDaemonSetPodsAndMarkReady(
|
||||
t.Errorf("controllerRef.Controller is not set to true")
|
||||
}
|
||||
|
||||
if !podutil.IsPodReady(pod) {
|
||||
if !podutil.IsPodReady(pod) && len(pod.Spec.NodeName) != 0 {
|
||||
podCopy := pod.DeepCopy()
|
||||
podCopy.Status = v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
@ -215,10 +334,47 @@ func validateDaemonSetPodsAndMarkReady(
|
||||
}
|
||||
}
|
||||
|
||||
// podUnschedulable returns a condition function that returns true if the given pod
|
||||
// gets unschedulable status.
|
||||
func podUnschedulable(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
// This could be a connection error so we want to retry.
|
||||
return false, nil
|
||||
}
|
||||
_, cond := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
|
||||
return cond != nil && cond.Status == v1.ConditionFalse &&
|
||||
cond.Reason == v1.PodReasonUnschedulable, nil
|
||||
}
|
||||
}
|
||||
|
||||
// waitForPodUnscheduleWithTimeout waits for a pod to fail scheduling and returns
|
||||
// an error if it does not become unschedulable within the given timeout.
|
||||
func waitForPodUnschedulableWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
|
||||
return wait.Poll(100*time.Millisecond, timeout, podUnschedulable(cs, pod.Namespace, pod.Name))
|
||||
}
|
||||
|
||||
// waitForPodUnschedule waits for a pod to fail scheduling and returns
|
||||
// an error if it does not become unschedulable within the timeout duration (30 seconds).
|
||||
func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error {
|
||||
return waitForPodUnschedulableWithTimeout(cs, pod, 10*time.Second)
|
||||
}
|
||||
|
||||
// waitForPodsCreated waits for number of pods are created.
|
||||
func waitForPodsCreated(podInformer cache.SharedIndexInformer, num int) error {
|
||||
return wait.Poll(100*time.Millisecond, 10*time.Second, func() (bool, error) {
|
||||
objects := podInformer.GetIndexer().List()
|
||||
return len(objects) == num, nil
|
||||
})
|
||||
}
|
||||
|
||||
func validateDaemonSetStatus(
|
||||
dsClient appstyped.DaemonSetInterface,
|
||||
dsName string,
|
||||
dsNamespace string,
|
||||
expectedNumberReady int32,
|
||||
t *testing.T) {
|
||||
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
|
||||
@ -257,71 +413,181 @@ func validateFailedPlacementEvent(eventClient corev1typed.EventInterface, t *tes
|
||||
}
|
||||
}
|
||||
|
||||
func TestOneNodeDaemonLaunchesPod(t *testing.T) {
|
||||
for _, strategy := range updateStrategies() {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("one-node-daemonset-test", server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
func forEachFeatureGate(t *testing.T, tf func(t *testing.T)) {
|
||||
for _, fg := range featureGates() {
|
||||
func() {
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled(fg)
|
||||
defer func() {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled))
|
||||
}()
|
||||
|
||||
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
|
||||
podClient := clientset.CoreV1().Pods(ns.Name)
|
||||
nodeClient := clientset.CoreV1().Nodes()
|
||||
podInformer := informers.Core().V1().Pods().Informer()
|
||||
stopCh := make(chan struct{})
|
||||
informers.Start(stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
ds := newDaemonSet("foo", ns.Name)
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
_, err := dsClient.Create(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DaemonSet: %v", err)
|
||||
}
|
||||
_, err = nodeClient.Create(newNode("single-node", nil))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
|
||||
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 1, t)
|
||||
validateDaemonSetStatus(dsClient, ds.Name, ds.Namespace, 1, t)
|
||||
|
||||
close(stopCh)
|
||||
for _, f := range []bool{true, false} {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f))
|
||||
t.Run(fmt.Sprintf("%v (%t)", fg, f), tf)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func forEachStrategy(t *testing.T, tf func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy)) {
|
||||
for _, strategy := range updateStrategies() {
|
||||
t.Run(fmt.Sprintf("%s (%v)", t.Name(), strategy),
|
||||
func(tt *testing.T) { tf(tt, strategy) })
|
||||
}
|
||||
}
|
||||
|
||||
func TestOneNodeDaemonLaunchesPod(t *testing.T) {
|
||||
forEachFeatureGate(t, func(t *testing.T) {
|
||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("one-node-daemonset-test", server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
|
||||
podClient := clientset.CoreV1().Pods(ns.Name)
|
||||
nodeClient := clientset.CoreV1().Nodes()
|
||||
podInformer := informers.Core().V1().Pods().Informer()
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
informers.Start(stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Start Scheduler
|
||||
setupScheduler(t, clientset, informers, stopCh)
|
||||
|
||||
ds := newDaemonSet("foo", ns.Name)
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
_, err := dsClient.Create(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DaemonSet: %v", err)
|
||||
}
|
||||
defer cleanupDaemonSets(t, clientset, ds)
|
||||
|
||||
_, err = nodeClient.Create(newNode("single-node", nil))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
|
||||
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 1, t)
|
||||
validateDaemonSetStatus(dsClient, ds.Name, 1, t)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
||||
for _, strategy := range updateStrategies() {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
forEachFeatureGate(t, func(t *testing.T) {
|
||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
|
||||
podClient := clientset.CoreV1().Pods(ns.Name)
|
||||
nodeClient := clientset.CoreV1().Nodes()
|
||||
podInformer := informers.Core().V1().Pods().Informer()
|
||||
stopCh := make(chan struct{})
|
||||
informers.Start(stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
|
||||
podClient := clientset.CoreV1().Pods(ns.Name)
|
||||
nodeClient := clientset.CoreV1().Nodes()
|
||||
podInformer := informers.Core().V1().Pods().Informer()
|
||||
|
||||
ds := newDaemonSet("foo", ns.Name)
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
_, err := dsClient.Create(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DaemonSet: %v", err)
|
||||
}
|
||||
addNodes(nodeClient, 0, 5, nil, t)
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 5, t)
|
||||
validateDaemonSetStatus(dsClient, ds.Name, ds.Namespace, 5, t)
|
||||
informers.Start(stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
close(stopCh)
|
||||
}
|
||||
// Start Scheduler
|
||||
setupScheduler(t, clientset, informers, stopCh)
|
||||
|
||||
ds := newDaemonSet("foo", ns.Name)
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
_, err := dsClient.Create(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DaemonSet: %v", err)
|
||||
}
|
||||
defer cleanupDaemonSets(t, clientset, ds)
|
||||
|
||||
addNodes(nodeClient, 0, 5, nil, t)
|
||||
|
||||
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 5, t)
|
||||
validateDaemonSetStatus(dsClient, ds.Name, 5, t)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestDaemonSetWithNodeSelectorLaunchesPods(t *testing.T) {
|
||||
forEachFeatureGate(t, func(t *testing.T) {
|
||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
|
||||
podClient := clientset.CoreV1().Pods(ns.Name)
|
||||
nodeClient := clientset.CoreV1().Nodes()
|
||||
podInformer := informers.Core().V1().Pods().Informer()
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
informers.Start(stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Start Scheduler
|
||||
setupScheduler(t, clientset, informers, stopCh)
|
||||
|
||||
ds := newDaemonSet("foo", ns.Name)
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
|
||||
ds.Spec.Template.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "zone",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"test"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node-1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := dsClient.Create(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DaemonSet: %v", err)
|
||||
}
|
||||
defer cleanupDaemonSets(t, clientset, ds)
|
||||
|
||||
addNodes(nodeClient, 0, 2, nil, t)
|
||||
// Two nodes with labels
|
||||
addNodes(nodeClient, 2, 2, map[string]string{
|
||||
"zone": "test",
|
||||
}, t)
|
||||
addNodes(nodeClient, 4, 2, nil, t)
|
||||
|
||||
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 3, t)
|
||||
validateDaemonSetStatus(dsClient, ds.Name, 3, t)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
|
||||
for _, strategy := range updateStrategies() {
|
||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t)
|
||||
@ -331,16 +597,25 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
|
||||
podClient := clientset.CoreV1().Pods(ns.Name)
|
||||
nodeClient := clientset.CoreV1().Nodes()
|
||||
podInformer := informers.Core().V1().Pods().Informer()
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
informers.Start(stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Start Scheduler
|
||||
setupScheduler(t, clientset, informers, stopCh)
|
||||
|
||||
ds := newDaemonSet("foo", ns.Name)
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
_, err := dsClient.Create(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DaemonSet: %v", err)
|
||||
}
|
||||
|
||||
defer cleanupDaemonSets(t, clientset, ds)
|
||||
|
||||
node := newNode("single-node", nil)
|
||||
node.Status.Conditions = []v1.NodeCondition{
|
||||
{Type: v1.NodeReady, Status: v1.ConditionFalse},
|
||||
@ -351,14 +626,12 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
|
||||
}
|
||||
|
||||
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 1, t)
|
||||
validateDaemonSetStatus(dsClient, ds.Name, ds.Namespace, 1, t)
|
||||
|
||||
close(stopCh)
|
||||
}
|
||||
validateDaemonSetStatus(dsClient, ds.Name, 1, t)
|
||||
})
|
||||
}
|
||||
|
||||
func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
for _, strategy := range updateStrategies() {
|
||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("insufficient-capacity", server, t)
|
||||
@ -366,8 +639,11 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
|
||||
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
|
||||
nodeClient := clientset.CoreV1().Nodes()
|
||||
eventClient := corev1typed.New(clientset.CoreV1().RESTClient()).Events(ns.Namespace)
|
||||
eventClient := clientset.CoreV1().Events(ns.Namespace)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
informers.Start(stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
@ -378,6 +654,8 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DaemonSet: %v", err)
|
||||
}
|
||||
defer cleanupDaemonSets(t, clientset, ds)
|
||||
|
||||
node := newNode("node-with-limited-memory", nil)
|
||||
node.Status.Allocatable = allocatableResources("100M", "200m")
|
||||
_, err = nodeClient.Create(node)
|
||||
@ -386,7 +664,79 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
}
|
||||
|
||||
validateFailedPlacementEvent(eventClient, t)
|
||||
|
||||
close(stopCh)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestInsufficientCapacityNodeDaemonSetCreateButNotLaunchPod tests that when "ScheduleDaemonSetPods"
|
||||
// feature is enabled, the DaemonSet should create Pods for all the nodes regardless of available resource
|
||||
// on the nodes, and kube-scheduler should not schedule Pods onto the nodes with insufficient resource.
|
||||
func TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled(t *testing.T) {
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods)
|
||||
defer func() {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t",
|
||||
features.ScheduleDaemonSetPods, enabled))
|
||||
}()
|
||||
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.ScheduleDaemonSetPods, true))
|
||||
|
||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("insufficient-capacity", server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
|
||||
podClient := clientset.CoreV1().Pods(ns.Name)
|
||||
podInformer := informers.Core().V1().Pods().Informer()
|
||||
nodeClient := clientset.CoreV1().Nodes()
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
informers.Start(stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Start Scheduler
|
||||
setupScheduler(t, clientset, informers, stopCh)
|
||||
|
||||
ds := newDaemonSet("foo", ns.Name)
|
||||
ds.Spec.Template.Spec = resourcePodSpec("", "120M", "75m")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
ds, err := dsClient.Create(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DaemonSet: %v", err)
|
||||
}
|
||||
|
||||
defer cleanupDaemonSets(t, clientset, ds)
|
||||
|
||||
node := newNode("node-with-limited-memory", nil)
|
||||
node.Status.Allocatable = allocatableResources("100M", "200m")
|
||||
_, err = nodeClient.Create(node)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
|
||||
if err := waitForPodsCreated(podInformer, 1); err != nil {
|
||||
t.Errorf("Failed to wait for pods created: %v", err)
|
||||
}
|
||||
|
||||
objects := podInformer.GetIndexer().List()
|
||||
for _, object := range objects {
|
||||
pod := object.(*v1.Pod)
|
||||
if err := waitForPodUnschedulable(clientset, pod); err != nil {
|
||||
t.Errorf("Failed to wait for unschedulable status of pod %+v", pod)
|
||||
}
|
||||
}
|
||||
|
||||
node1 := newNode("node-with-enough-memory", nil)
|
||||
node1.Status.Allocatable = allocatableResources("200M", "2000m")
|
||||
_, err = nodeClient.Create(node1)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
|
||||
// When ScheduleDaemonSetPods enabled, 2 pods are created. But only one
|
||||
// of two Pods is scheduled by default scheduler.
|
||||
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 2, t)
|
||||
validateDaemonSetStatus(dsClient, ds.Name, 1, t)
|
||||
})
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/defaulttolerationseconds/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/defaulttolerationseconds/BUILD
generated
vendored
@ -17,13 +17,13 @@ go_test(
|
||||
"integration",
|
||||
],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//plugin/pkg/admission/defaulttolerationseconds:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
|
@ -21,9 +21,9 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds"
|
||||
@ -33,12 +33,11 @@ import (
|
||||
func TestAdmission(t *testing.T) {
|
||||
masterConfig := framework.NewMasterConfig()
|
||||
masterConfig.GenericConfig.EnableProfiling = true
|
||||
masterConfig.GenericConfig.EnableMetrics = true
|
||||
masterConfig.GenericConfig.AdmissionControl = defaulttolerationseconds.NewDefaultTolerationSeconds()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("default-toleration-seconds", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
3
vendor/k8s.io/kubernetes/test/integration/deployment/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/test/integration/deployment/BUILD
generated
vendored
@ -19,8 +19,8 @@ go_test(
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/util/pointer:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
@ -41,6 +41,7 @@ go_library(
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
293
vendor/k8s.io/kubernetes/test/integration/deployment/deployment_test.go
generated
vendored
293
vendor/k8s.io/kubernetes/test/integration/deployment/deployment_test.go
generated
vendored
@ -22,8 +22,8 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
@ -47,7 +47,7 @@ func TestNewDeployment(t *testing.T) {
|
||||
|
||||
tester.deployment.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err)
|
||||
}
|
||||
@ -81,6 +81,32 @@ func TestNewDeployment(t *testing.T) {
|
||||
if newRS.Annotations[v1.LastAppliedConfigAnnotation] != "" {
|
||||
t.Errorf("expected new ReplicaSet last-applied annotation not copied from Deployment %s", tester.deployment.Name)
|
||||
}
|
||||
|
||||
// New RS should contain pod-template-hash in its selector, label, and template label
|
||||
rsHash, err := checkRSHashLabels(newRS)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// All pods targeted by the deployment should contain pod-template-hash in their labels
|
||||
selector, err := metav1.LabelSelectorAsSelector(tester.deployment.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse deployment %s selector: %v", name, err)
|
||||
}
|
||||
pods, err := c.CoreV1().Pods(ns.Name).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to list pods of deployment %s: %v", name, err)
|
||||
}
|
||||
if len(pods.Items) != int(replicas) {
|
||||
t.Errorf("expected %d pods, got %d pods", replicas, len(pods.Items))
|
||||
}
|
||||
podHash, err := checkPodsHashLabel(pods)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if rsHash != podHash {
|
||||
t.Errorf("found mismatching pod-template-hash value: rs hash = %s whereas pod hash = %s", rsHash, podHash)
|
||||
}
|
||||
}
|
||||
|
||||
// Deployments should support roll out, roll back, and roll over
|
||||
@ -102,14 +128,14 @@ func TestDeploymentRollingUpdate(t *testing.T) {
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)}
|
||||
tester.deployment.Spec.MinReadySeconds = 4
|
||||
quarter := intstr.FromString("25%")
|
||||
tester.deployment.Spec.Strategy.RollingUpdate = &v1beta1.RollingUpdateDeployment{
|
||||
tester.deployment.Spec.Strategy.RollingUpdate = &apps.RollingUpdateDeployment{
|
||||
MaxUnavailable: &quarter,
|
||||
MaxSurge: &quarter,
|
||||
}
|
||||
|
||||
// Create a deployment.
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err)
|
||||
}
|
||||
@ -126,7 +152,7 @@ func TestDeploymentRollingUpdate(t *testing.T) {
|
||||
if oriImage == image {
|
||||
t.Fatalf("bad test setup, deployment %s roll out with the same image", tester.deployment.Name)
|
||||
}
|
||||
imageFn := func(update *v1beta1.Deployment) {
|
||||
imageFn := func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = image
|
||||
}
|
||||
tester.deployment, err = tester.updateDeployment(imageFn)
|
||||
@ -160,7 +186,7 @@ func TestDeploymentRollingUpdate(t *testing.T) {
|
||||
|
||||
// 3. Roll over a deployment before the previous rolling update finishes.
|
||||
image = "dont-finish"
|
||||
imageFn = func(update *v1beta1.Deployment) {
|
||||
imageFn = func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = image
|
||||
}
|
||||
tester.deployment, err = tester.updateDeployment(imageFn)
|
||||
@ -173,7 +199,7 @@ func TestDeploymentRollingUpdate(t *testing.T) {
|
||||
// We don't mark pods as ready so that rollout won't finish.
|
||||
// Before the rollout finishes, trigger another rollout.
|
||||
image = "rollover"
|
||||
imageFn = func(update *v1beta1.Deployment) {
|
||||
imageFn = func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = image
|
||||
}
|
||||
tester.deployment, err = tester.updateDeployment(imageFn)
|
||||
@ -186,7 +212,7 @@ func TestDeploymentRollingUpdate(t *testing.T) {
|
||||
if err := tester.waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(tester.deployment, c.ExtensionsV1beta1())
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(tester.deployment, c.AppsV1())
|
||||
if err != nil {
|
||||
t.Fatalf("failed retrieving old replicasets of deployment %s: %v", tester.deployment.Name, err)
|
||||
}
|
||||
@ -206,13 +232,18 @@ func TestDeploymentSelectorImmutability(t *testing.T) {
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, int32(20))}
|
||||
deploymentExtensionsV1beta1, err := c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
var err error
|
||||
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create extensions/v1beta1 deployment %s: %v", tester.deployment.Name, err)
|
||||
t.Fatalf("failed to create apps/v1 deployment %s: %v", tester.deployment.Name, err)
|
||||
}
|
||||
|
||||
// test to ensure extensions/v1beta1 selector is mutable
|
||||
newSelectorLabels := map[string]string{"name_extensions_v1beta1": "test_extensions_v1beta1"}
|
||||
deploymentExtensionsV1beta1, err := c.ExtensionsV1beta1().Deployments(ns.Name).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get extensions/v1beta deployment %s: %v", name, err)
|
||||
}
|
||||
deploymentExtensionsV1beta1.Spec.Selector.MatchLabels = newSelectorLabels
|
||||
deploymentExtensionsV1beta1.Spec.Template.Labels = newSelectorLabels
|
||||
updatedDeploymentExtensionsV1beta1, err := c.ExtensionsV1beta1().Deployments(ns.Name).Update(deploymentExtensionsV1beta1)
|
||||
@ -257,6 +288,22 @@ func TestDeploymentSelectorImmutability(t *testing.T) {
|
||||
if !strings.Contains(err.Error(), expectedErrType) || !strings.Contains(err.Error(), expectedErrDetail) {
|
||||
t.Errorf("error message does not match, expected type: %s, expected detail: %s, got: %s", expectedErrType, expectedErrDetail, err.Error())
|
||||
}
|
||||
|
||||
// test to ensure apps/v1 selector is immutable
|
||||
deploymentAppsV1, err := c.AppsV1().Deployments(ns.Name).Get(updatedDeploymentAppsV1beta1.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get apps/v1 deployment %s: %v", updatedDeploymentAppsV1beta1.Name, err)
|
||||
}
|
||||
newSelectorLabels = map[string]string{"name_apps_v1": "test_apps_v1"}
|
||||
deploymentAppsV1.Spec.Selector.MatchLabels = newSelectorLabels
|
||||
deploymentAppsV1.Spec.Template.Labels = newSelectorLabels
|
||||
_, err = c.AppsV1().Deployments(ns.Name).Update(deploymentAppsV1)
|
||||
if err == nil {
|
||||
t.Fatalf("failed to provide validation error when changing immutable selector when updating apps/v1 deployment %s", deploymentAppsV1.Name)
|
||||
}
|
||||
if !strings.Contains(err.Error(), expectedErrType) || !strings.Contains(err.Error(), expectedErrDetail) {
|
||||
t.Errorf("error message does not match, expected type: %s, expected detail: %s, got: %s", expectedErrType, expectedErrDetail, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Paused deployment should not start new rollout
|
||||
@ -274,7 +321,7 @@ func TestPausedDeployment(t *testing.T) {
|
||||
tester.deployment.Spec.Template.Spec.TerminationGracePeriodSeconds = &tgps
|
||||
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err)
|
||||
}
|
||||
@ -332,7 +379,7 @@ func TestPausedDeployment(t *testing.T) {
|
||||
|
||||
// Update the deployment template
|
||||
newTGPS := int64(0)
|
||||
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
|
||||
tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.TerminationGracePeriodSeconds = &newTGPS
|
||||
})
|
||||
if err != nil {
|
||||
@ -349,7 +396,7 @@ func TestPausedDeployment(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, allOldRs, err := deploymentutil.GetOldReplicaSets(tester.deployment, c.ExtensionsV1beta1())
|
||||
_, allOldRs, err := deploymentutil.GetOldReplicaSets(tester.deployment, c.AppsV1())
|
||||
if err != nil {
|
||||
t.Fatalf("failed retrieving old replicasets of deployment %s: %v", tester.deployment.Name, err)
|
||||
}
|
||||
@ -375,7 +422,7 @@ func TestScalePausedDeployment(t *testing.T) {
|
||||
tester.deployment.Spec.Template.Spec.TerminationGracePeriodSeconds = &tgps
|
||||
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err)
|
||||
}
|
||||
@ -416,7 +463,7 @@ func TestScalePausedDeployment(t *testing.T) {
|
||||
|
||||
// Scale the paused deployment.
|
||||
newReplicas := int32(10)
|
||||
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
|
||||
tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) {
|
||||
update.Spec.Replicas = &newReplicas
|
||||
})
|
||||
if err != nil {
|
||||
@ -456,7 +503,7 @@ func TestDeploymentHashCollision(t *testing.T) {
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)}
|
||||
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err)
|
||||
}
|
||||
@ -474,14 +521,14 @@ func TestDeploymentHashCollision(t *testing.T) {
|
||||
}
|
||||
|
||||
// Mock a hash collision
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1())
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1())
|
||||
if err != nil {
|
||||
t.Fatalf("failed getting new replicaset of deployment %s: %v", tester.deployment.Name, err)
|
||||
}
|
||||
if newRS == nil {
|
||||
t.Fatalf("unable to find new replicaset of deployment %s", tester.deployment.Name)
|
||||
}
|
||||
_, err = tester.updateReplicaSet(newRS.Name, func(update *v1beta1.ReplicaSet) {
|
||||
_, err = tester.updateReplicaSet(newRS.Name, func(update *apps.ReplicaSet) {
|
||||
*update.Spec.Template.Spec.TerminationGracePeriodSeconds = int64(5)
|
||||
})
|
||||
if err != nil {
|
||||
@ -490,7 +537,7 @@ func TestDeploymentHashCollision(t *testing.T) {
|
||||
|
||||
// Expect deployment collision counter to increment
|
||||
if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
d, err := c.ExtensionsV1beta1().Deployments(ns.Name).Get(tester.deployment.Name, metav1.GetOptions{})
|
||||
d, err := c.AppsV1().Deployments(ns.Name).Get(tester.deployment.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
@ -520,7 +567,7 @@ func TestRollbackDeploymentRSNoRevision(t *testing.T) {
|
||||
rs.Annotations = make(map[string]string)
|
||||
rs.Annotations["make"] = "difference"
|
||||
rs.Spec.Template.Spec.Containers[0].Image = "different-image"
|
||||
_, err := c.ExtensionsV1beta1().ReplicaSets(ns.Name).Create(rs)
|
||||
_, err := c.AppsV1().ReplicaSets(ns.Name).Create(rs)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create replicaset %s: %v", rsName, err)
|
||||
}
|
||||
@ -528,9 +575,13 @@ func TestRollbackDeploymentRSNoRevision(t *testing.T) {
|
||||
replicas := int32(1)
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)}
|
||||
oriImage := tester.deployment.Spec.Template.Spec.Containers[0].Image
|
||||
// Set absolute rollout limits (defaults changed to percentages)
|
||||
max := intstr.FromInt(1)
|
||||
tester.deployment.Spec.Strategy.RollingUpdate.MaxUnavailable = &max
|
||||
tester.deployment.Spec.Strategy.RollingUpdate.MaxSurge = &max
|
||||
|
||||
// Create a deployment which have different template than the replica set created above.
|
||||
if tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment); err != nil {
|
||||
if tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment); err != nil {
|
||||
t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err)
|
||||
}
|
||||
|
||||
@ -569,7 +620,7 @@ func TestRollbackDeploymentRSNoRevision(t *testing.T) {
|
||||
|
||||
// 2. Update the deployment to revision 2.
|
||||
updatedImage := "update"
|
||||
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
|
||||
tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = updatedImage
|
||||
update.Spec.Template.Spec.Containers[0].Image = updatedImage
|
||||
})
|
||||
@ -618,10 +669,10 @@ func TestRollbackDeploymentRSNoRevision(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func checkRSHashLabels(rs *v1beta1.ReplicaSet) (string, error) {
|
||||
hash := rs.Labels[v1beta1.DefaultDeploymentUniqueLabelKey]
|
||||
selectorHash := rs.Spec.Selector.MatchLabels[v1beta1.DefaultDeploymentUniqueLabelKey]
|
||||
templateLabelHash := rs.Spec.Template.Labels[v1beta1.DefaultDeploymentUniqueLabelKey]
|
||||
func checkRSHashLabels(rs *apps.ReplicaSet) (string, error) {
|
||||
hash := rs.Labels[apps.DefaultDeploymentUniqueLabelKey]
|
||||
selectorHash := rs.Spec.Selector.MatchLabels[apps.DefaultDeploymentUniqueLabelKey]
|
||||
templateLabelHash := rs.Spec.Template.Labels[apps.DefaultDeploymentUniqueLabelKey]
|
||||
|
||||
if hash != selectorHash || selectorHash != templateLabelHash {
|
||||
return "", fmt.Errorf("mismatching hash value found in replicaset %s: %#v", rs.Name, rs)
|
||||
@ -639,7 +690,7 @@ func checkPodsHashLabel(pods *v1.PodList) (string, error) {
|
||||
}
|
||||
var hash string
|
||||
for _, pod := range pods.Items {
|
||||
podHash := pod.Labels[v1beta1.DefaultDeploymentUniqueLabelKey]
|
||||
podHash := pod.Labels[apps.DefaultDeploymentUniqueLabelKey]
|
||||
if len(podHash) == 0 {
|
||||
return "", fmt.Errorf("found pod %s missing pod-template-hash label: %#v", pod.Name, pods)
|
||||
}
|
||||
@ -654,104 +705,6 @@ func checkPodsHashLabel(pods *v1.PodList) (string, error) {
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
// Deployment should label adopted ReplicaSets and Pods.
|
||||
func TestDeploymentLabelAdopted(t *testing.T) {
|
||||
s, closeFn, rm, dc, informers, c := dcSetup(t)
|
||||
defer closeFn()
|
||||
name := "test-adopted-deployment"
|
||||
ns := framework.CreateTestingNamespace(name, s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
// Start informer and controllers
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Create a RS to be adopted by the deployment.
|
||||
rsName := "test-adopted-controller"
|
||||
replicas := int32(1)
|
||||
rs := newReplicaSet(rsName, ns.Name, replicas)
|
||||
_, err := c.ExtensionsV1beta1().ReplicaSets(ns.Name).Create(rs)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create replicaset %s: %v", rsName, err)
|
||||
}
|
||||
// Mark RS pods as ready.
|
||||
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse replicaset %s selector: %v", rsName, err)
|
||||
}
|
||||
if err = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
pods, err := c.CoreV1().Pods(ns.Name).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(pods.Items) != int(replicas) {
|
||||
return false, nil
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
if err = markPodReady(c, ns.Name, &pod); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to mark pods replicaset %s as ready: %v", rsName, err)
|
||||
}
|
||||
|
||||
// Create a Deployment to adopt the old rs.
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)}
|
||||
if tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment); err != nil {
|
||||
t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err)
|
||||
}
|
||||
|
||||
// Wait for the Deployment to be updated to revision 1
|
||||
if err = tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// The RS and pods should be relabeled after the Deployment finishes adopting it and completes.
|
||||
if err := tester.waitForDeploymentComplete(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// There should be no old RSes (overlapping RS)
|
||||
oldRSs, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(tester.deployment, c.ExtensionsV1beta1())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get all replicasets owned by deployment %s: %v", name, err)
|
||||
}
|
||||
if len(oldRSs) != 0 || len(allOldRSs) != 0 {
|
||||
t.Errorf("expected deployment to have no old replicasets, got %d old replicasets", len(allOldRSs))
|
||||
}
|
||||
|
||||
// New RS should be relabeled, i.e. contain pod-template-hash in its selector, label, and template label
|
||||
rsHash, err := checkRSHashLabels(newRS)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// All pods targeted by the deployment should contain pod-template-hash in their labels, and there should be only 3 pods
|
||||
selector, err = metav1.LabelSelectorAsSelector(tester.deployment.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse deployment %s selector: %v", name, err)
|
||||
}
|
||||
pods, err := c.CoreV1().Pods(ns.Name).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to list pods of deployment %s: %v", name, err)
|
||||
}
|
||||
if len(pods.Items) != int(replicas) {
|
||||
t.Errorf("expected %d pods, got %d pods", replicas, len(pods.Items))
|
||||
}
|
||||
podHash, err := checkPodsHashLabel(pods)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if rsHash != podHash {
|
||||
t.Errorf("found mismatching pod-template-hash value: rs hash = %s whereas pod hash = %s", rsHash, podHash)
|
||||
}
|
||||
}
|
||||
|
||||
// Deployment should have a timeout condition when it fails to progress after given deadline.
|
||||
func TestFailedDeployment(t *testing.T) {
|
||||
s, closeFn, rm, dc, informers, c := dcSetup(t)
|
||||
@ -766,7 +719,7 @@ func TestFailedDeployment(t *testing.T) {
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
|
||||
tester.deployment.Spec.ProgressDeadlineSeconds = &three
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
@ -784,7 +737,7 @@ func TestFailedDeployment(t *testing.T) {
|
||||
|
||||
// Pods are not marked as Ready, therefore the deployment progress will eventually timeout after progressDeadlineSeconds has passed.
|
||||
// Wait for the deployment to have a progress timeout condition.
|
||||
if err = tester.waitForDeploymentWithCondition(deploymentutil.TimedOutReason, v1beta1.DeploymentProgressing); err != nil {
|
||||
if err = tester.waitForDeploymentWithCondition(deploymentutil.TimedOutReason, apps.DeploymentProgressing); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -794,7 +747,7 @@ func TestFailedDeployment(t *testing.T) {
|
||||
}
|
||||
|
||||
// Wait for the deployment to have a progress complete condition.
|
||||
if err = tester.waitForDeploymentWithCondition(deploymentutil.NewRSAvailableReason, v1beta1.DeploymentProgressing); err != nil {
|
||||
if err = tester.waitForDeploymentWithCondition(deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -822,9 +775,9 @@ func TestOverlappingDeployments(t *testing.T) {
|
||||
|
||||
// Create 2 deployments with overlapping selectors
|
||||
var err error
|
||||
var rss []*v1beta1.ReplicaSet
|
||||
var rss []*apps.ReplicaSet
|
||||
for _, tester := range testers {
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
|
||||
dname := tester.deployment.Name
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", dname, err)
|
||||
@ -856,7 +809,7 @@ func TestOverlappingDeployments(t *testing.T) {
|
||||
|
||||
// Scale only the first deployment by 1
|
||||
newReplicas := replicas + 1
|
||||
testers[0].deployment, err = testers[0].updateDeployment(func(update *v1beta1.Deployment) {
|
||||
testers[0].deployment, err = testers[0].updateDeployment(func(update *apps.Deployment) {
|
||||
update.Spec.Replicas = &newReplicas
|
||||
})
|
||||
if err != nil {
|
||||
@ -870,7 +823,7 @@ func TestOverlappingDeployments(t *testing.T) {
|
||||
|
||||
// Verify replicaset of both deployments has updated number of replicas
|
||||
for i, tester := range testers {
|
||||
rs, err := c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(rss[i].Name, metav1.GetOptions{})
|
||||
rs, err := c.AppsV1().ReplicaSets(ns.Name).Get(rss[i].Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get replicaset %q: %v", rss[i].Name, err)
|
||||
}
|
||||
@ -900,7 +853,7 @@ func TestScaledRolloutDeployment(t *testing.T) {
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)}
|
||||
tester.deployment.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3)
|
||||
tester.deployment.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", name, err)
|
||||
}
|
||||
@ -919,7 +872,7 @@ func TestScaledRolloutDeployment(t *testing.T) {
|
||||
|
||||
// Update the deployment with another new image but do not mark the pods as ready to block new replicaset
|
||||
fakeImage2 := "fakeimage2"
|
||||
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
|
||||
tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = fakeImage2
|
||||
})
|
||||
if err != nil {
|
||||
@ -930,7 +883,7 @@ func TestScaledRolloutDeployment(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify the deployment has minimum available replicas after 2nd rollout
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Get(name, metav1.GetOptions{})
|
||||
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get deployment %q: %v", name, err)
|
||||
}
|
||||
@ -940,7 +893,7 @@ func TestScaledRolloutDeployment(t *testing.T) {
|
||||
}
|
||||
|
||||
// Wait for old replicaset of 1st rollout to have desired replicas
|
||||
firstRS, err = c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(firstRS.Name, metav1.GetOptions{})
|
||||
firstRS, err = c.AppsV1().ReplicaSets(ns.Name).Get(firstRS.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get replicaset %q: %v", firstRS.Name, err)
|
||||
}
|
||||
@ -960,7 +913,7 @@ func TestScaledRolloutDeployment(t *testing.T) {
|
||||
// Scale up the deployment and update its image to another new image simultaneously (this time marks all pods as ready)
|
||||
newReplicas := int32(20)
|
||||
fakeImage3 := "fakeimage3"
|
||||
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
|
||||
tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) {
|
||||
update.Spec.Replicas = &newReplicas
|
||||
update.Spec.Template.Spec.Containers[0].Image = fakeImage3
|
||||
})
|
||||
@ -975,13 +928,13 @@ func TestScaledRolloutDeployment(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify every replicaset has correct desiredReplicas annotation after 3rd rollout
|
||||
thirdRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1())
|
||||
thirdRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1())
|
||||
if err != nil {
|
||||
t.Fatalf("failed getting new revision 3 replicaset for deployment %q: %v", name, err)
|
||||
}
|
||||
rss := []*v1beta1.ReplicaSet{firstRS, secondRS, thirdRS}
|
||||
rss := []*apps.ReplicaSet{firstRS, secondRS, thirdRS}
|
||||
for _, curRS := range rss {
|
||||
curRS, err = c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(curRS.Name, metav1.GetOptions{})
|
||||
curRS, err = c.AppsV1().ReplicaSets(ns.Name).Get(curRS.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get replicaset when checking desired replicas annotation: %v", err)
|
||||
}
|
||||
@ -996,7 +949,7 @@ func TestScaledRolloutDeployment(t *testing.T) {
|
||||
|
||||
// Update the deployment with another new image but do not mark the pods as ready to block new replicaset
|
||||
fakeImage4 := "fakeimage4"
|
||||
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
|
||||
tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = fakeImage4
|
||||
})
|
||||
if err != nil {
|
||||
@ -1007,7 +960,7 @@ func TestScaledRolloutDeployment(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify the deployment has minimum available replicas after 4th rollout
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Get(name, metav1.GetOptions{})
|
||||
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get deployment %q: %v", name, err)
|
||||
}
|
||||
@ -1017,7 +970,7 @@ func TestScaledRolloutDeployment(t *testing.T) {
|
||||
}
|
||||
|
||||
// Wait for old replicaset of 3rd rollout to have desired replicas
|
||||
thirdRS, err = c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(thirdRS.Name, metav1.GetOptions{})
|
||||
thirdRS, err = c.AppsV1().ReplicaSets(ns.Name).Get(thirdRS.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get replicaset %q: %v", thirdRS.Name, err)
|
||||
}
|
||||
@ -1037,7 +990,7 @@ func TestScaledRolloutDeployment(t *testing.T) {
|
||||
// Scale down the deployment and update its image to another new image simultaneously (this time marks all pods as ready)
|
||||
newReplicas = int32(5)
|
||||
fakeImage5 := "fakeimage5"
|
||||
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
|
||||
tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) {
|
||||
update.Spec.Replicas = &newReplicas
|
||||
update.Spec.Template.Spec.Containers[0].Image = fakeImage5
|
||||
})
|
||||
@ -1052,13 +1005,13 @@ func TestScaledRolloutDeployment(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify every replicaset has correct desiredReplicas annotation after 5th rollout
|
||||
fifthRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1())
|
||||
fifthRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1())
|
||||
if err != nil {
|
||||
t.Fatalf("failed getting new revision 5 replicaset for deployment %q: %v", name, err)
|
||||
}
|
||||
rss = []*v1beta1.ReplicaSet{thirdRS, fourthRS, fifthRS}
|
||||
rss = []*apps.ReplicaSet{thirdRS, fourthRS, fifthRS}
|
||||
for _, curRS := range rss {
|
||||
curRS, err = c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(curRS.Name, metav1.GetOptions{})
|
||||
curRS, err = c.AppsV1().ReplicaSets(ns.Name).Get(curRS.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get replicaset when checking desired replicas annotation: %v", err)
|
||||
}
|
||||
@ -1082,10 +1035,10 @@ func TestSpecReplicasChange(t *testing.T) {
|
||||
deploymentName := "deployment"
|
||||
replicas := int32(1)
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
|
||||
tester.deployment.Spec.Strategy.Type = v1beta1.RecreateDeploymentStrategyType
|
||||
tester.deployment.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType
|
||||
tester.deployment.Spec.Strategy.RollingUpdate = nil
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
@ -1111,7 +1064,7 @@ func TestSpecReplicasChange(t *testing.T) {
|
||||
// Add a template annotation change to test deployment's status does update
|
||||
// without .spec.replicas change
|
||||
var oldGeneration int64
|
||||
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
|
||||
tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) {
|
||||
oldGeneration = update.Generation
|
||||
update.Spec.RevisionHistoryLimit = pointer.Int32Ptr(4)
|
||||
})
|
||||
@ -1140,8 +1093,10 @@ func TestDeploymentAvailableCondition(t *testing.T) {
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
|
||||
// Assign a high value to the deployment's minReadySeconds
|
||||
tester.deployment.Spec.MinReadySeconds = 3600
|
||||
// progressDeadlineSeconds must be greater than minReadySeconds
|
||||
tester.deployment.Spec.ProgressDeadlineSeconds = pointer.Int32Ptr(7200)
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
@ -1159,7 +1114,7 @@ func TestDeploymentAvailableCondition(t *testing.T) {
|
||||
}
|
||||
|
||||
// Wait for the deployment to have MinimumReplicasUnavailable reason because the pods are not marked as ready
|
||||
if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasUnavailable, v1beta1.DeploymentAvailable); err != nil {
|
||||
if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasUnavailable, apps.DeploymentAvailable); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -1179,7 +1134,7 @@ func TestDeploymentAvailableCondition(t *testing.T) {
|
||||
}
|
||||
|
||||
// Wait for the deployment to still have MinimumReplicasUnavailable reason within minReadySeconds period
|
||||
if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasUnavailable, v1beta1.DeploymentAvailable); err != nil {
|
||||
if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasUnavailable, apps.DeploymentAvailable); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -1189,7 +1144,7 @@ func TestDeploymentAvailableCondition(t *testing.T) {
|
||||
}
|
||||
|
||||
// Update the deployment's minReadySeconds to a small value
|
||||
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
|
||||
tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) {
|
||||
update.Spec.MinReadySeconds = 1
|
||||
})
|
||||
if err != nil {
|
||||
@ -1202,7 +1157,7 @@ func TestDeploymentAvailableCondition(t *testing.T) {
|
||||
}
|
||||
|
||||
// Wait for the deployment to have MinimumReplicasAvailable reason after minReadySeconds period
|
||||
if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasAvailable, v1beta1.DeploymentAvailable); err != nil {
|
||||
if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasAvailable, apps.DeploymentAvailable); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -1213,10 +1168,10 @@ func TestDeploymentAvailableCondition(t *testing.T) {
|
||||
}
|
||||
|
||||
// Wait for deployment to automatically patch incorrect ControllerRef of RS
|
||||
func testRSControllerRefPatch(t *testing.T, tester *deploymentTester, rs *v1beta1.ReplicaSet, ownerReference *metav1.OwnerReference, expectedOwnerReferenceNum int) {
|
||||
func testRSControllerRefPatch(t *testing.T, tester *deploymentTester, rs *apps.ReplicaSet, ownerReference *metav1.OwnerReference, expectedOwnerReferenceNum int) {
|
||||
ns := rs.Namespace
|
||||
rsClient := tester.c.ExtensionsV1beta1().ReplicaSets(ns)
|
||||
rs, err := tester.updateReplicaSet(rs.Name, func(update *v1beta1.ReplicaSet) {
|
||||
rsClient := tester.c.AppsV1().ReplicaSets(ns)
|
||||
rs, err := tester.updateReplicaSet(rs.Name, func(update *apps.ReplicaSet) {
|
||||
update.OwnerReferences = []metav1.OwnerReference{*ownerReference}
|
||||
})
|
||||
if err != nil {
|
||||
@ -1258,7 +1213,7 @@ func TestGeneralReplicaSetAdoption(t *testing.T) {
|
||||
replicas := int32(1)
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
@ -1281,7 +1236,7 @@ func TestGeneralReplicaSetAdoption(t *testing.T) {
|
||||
}
|
||||
|
||||
// Get replicaset of the deployment
|
||||
rs, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1())
|
||||
rs, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get replicaset of deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
@ -1305,7 +1260,7 @@ func TestGeneralReplicaSetAdoption(t *testing.T) {
|
||||
func testScalingUsingScaleSubresource(t *testing.T, tester *deploymentTester, replicas int32) {
|
||||
ns := tester.deployment.Namespace
|
||||
deploymentName := tester.deployment.Name
|
||||
deploymentClient := tester.c.ExtensionsV1beta1().Deployments(ns)
|
||||
deploymentClient := tester.c.AppsV1().Deployments(ns)
|
||||
deployment, err := deploymentClient.Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain deployment %q: %v", deploymentName, err)
|
||||
@ -1352,7 +1307,7 @@ func TestDeploymentScaleSubresource(t *testing.T) {
|
||||
replicas := int32(2)
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
@ -1396,7 +1351,7 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
|
||||
replicas := int32(1)
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
@ -1421,7 +1376,7 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
|
||||
// Orphaning: deployment should remove OwnerReference from a RS when the RS's labels change to not match its labels
|
||||
|
||||
// Get replicaset of the deployment
|
||||
rs, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1())
|
||||
rs, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get replicaset of deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
@ -1440,7 +1395,7 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
|
||||
|
||||
// Change the replicaset's labels to not match the deployment's labels
|
||||
labelMap := map[string]string{"new-name": "new-test"}
|
||||
rs, err = tester.updateReplicaSet(rs.Name, func(update *v1beta1.ReplicaSet) {
|
||||
rs, err = tester.updateReplicaSet(rs.Name, func(update *apps.ReplicaSet) {
|
||||
update.Labels = labelMap
|
||||
})
|
||||
if err != nil {
|
||||
@ -1448,7 +1403,7 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
|
||||
}
|
||||
|
||||
// Wait for the controllerRef of the replicaset to become nil
|
||||
rsClient := tester.c.ExtensionsV1beta1().ReplicaSets(ns.Name)
|
||||
rsClient := tester.c.AppsV1().ReplicaSets(ns.Name)
|
||||
if err = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
rs, err = rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -1462,9 +1417,9 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
|
||||
// Wait for the deployment to create a new replicaset
|
||||
// This will trigger collision avoidance due to deterministic nature of replicaset name
|
||||
// i.e., the new replicaset will have a name with different hash to preserve name uniqueness
|
||||
var newRS *v1beta1.ReplicaSet
|
||||
var newRS *apps.ReplicaSet
|
||||
if err = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
newRS, err = deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1())
|
||||
newRS, err = deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1())
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get new replicaset of deployment %q after orphaning: %v", deploymentName, err)
|
||||
}
|
||||
@ -1479,7 +1434,7 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
|
||||
// Adoption: deployment should add controllerRef to a RS when the RS's labels change to match its labels
|
||||
|
||||
// Change the old replicaset's labels to match the deployment's labels
|
||||
rs, err = tester.updateReplicaSet(rs.Name, func(update *v1beta1.ReplicaSet) {
|
||||
rs, err = tester.updateReplicaSet(rs.Name, func(update *apps.ReplicaSet) {
|
||||
update.Labels = testLabels()
|
||||
})
|
||||
if err != nil {
|
||||
|
73
vendor/k8s.io/kubernetes/test/integration/deployment/util.go
generated
vendored
73
vendor/k8s.io/kubernetes/test/integration/deployment/util.go
generated
vendored
@ -23,8 +23,9 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -48,18 +49,18 @@ const (
|
||||
fakeImage = "fakeimage"
|
||||
)
|
||||
|
||||
var pauseFn = func(update *v1beta1.Deployment) {
|
||||
var pauseFn = func(update *apps.Deployment) {
|
||||
update.Spec.Paused = true
|
||||
}
|
||||
|
||||
var resumeFn = func(update *v1beta1.Deployment) {
|
||||
var resumeFn = func(update *apps.Deployment) {
|
||||
update.Spec.Paused = false
|
||||
}
|
||||
|
||||
type deploymentTester struct {
|
||||
t *testing.T
|
||||
c clientset.Interface
|
||||
deployment *v1beta1.Deployment
|
||||
deployment *apps.Deployment
|
||||
}
|
||||
|
||||
func testLabels() map[string]string {
|
||||
@ -67,22 +68,22 @@ func testLabels() map[string]string {
|
||||
}
|
||||
|
||||
// newDeployment returns a RollingUpdate Deployment with with a fake container image
|
||||
func newDeployment(name, ns string, replicas int32) *v1beta1.Deployment {
|
||||
return &v1beta1.Deployment{
|
||||
func newDeployment(name, ns string, replicas int32) *apps.Deployment {
|
||||
return &apps.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Deployment",
|
||||
APIVersion: "extensions/v1beta1",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1beta1.DeploymentSpec{
|
||||
Spec: apps.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: testLabels()},
|
||||
Strategy: v1beta1.DeploymentStrategy{
|
||||
Type: v1beta1.RollingUpdateDeploymentStrategyType,
|
||||
RollingUpdate: new(v1beta1.RollingUpdateDeployment),
|
||||
Strategy: apps.DeploymentStrategy{
|
||||
Type: apps.RollingUpdateDeploymentStrategyType,
|
||||
RollingUpdate: new(apps.RollingUpdateDeployment),
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -101,8 +102,8 @@ func newDeployment(name, ns string, replicas int32) *v1beta1.Deployment {
|
||||
}
|
||||
}
|
||||
|
||||
func newReplicaSet(name, ns string, replicas int32) *v1beta1.ReplicaSet {
|
||||
return &v1beta1.ReplicaSet{
|
||||
func newReplicaSet(name, ns string, replicas int32) *apps.ReplicaSet {
|
||||
return &apps.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicaSet",
|
||||
APIVersion: "extensions/v1beta1",
|
||||
@ -110,8 +111,9 @@ func newReplicaSet(name, ns string, replicas int32) *v1beta1.ReplicaSet {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
Labels: testLabels(),
|
||||
},
|
||||
Spec: v1beta1.ReplicaSetSpec{
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: testLabels(),
|
||||
},
|
||||
@ -133,11 +135,11 @@ func newReplicaSet(name, ns string, replicas int32) *v1beta1.ReplicaSet {
|
||||
}
|
||||
}
|
||||
|
||||
func newDeploymentRollback(name string, annotations map[string]string, revision int64) *v1beta1.DeploymentRollback {
|
||||
return &v1beta1.DeploymentRollback{
|
||||
func newDeploymentRollback(name string, annotations map[string]string, revision int64) *extensions.DeploymentRollback {
|
||||
return &extensions.DeploymentRollback{
|
||||
Name: name,
|
||||
UpdatedAnnotations: annotations,
|
||||
RollbackTo: v1beta1.RollbackConfig{Revision: revision},
|
||||
RollbackTo: extensions.RollbackConfig{Revision: revision},
|
||||
}
|
||||
}
|
||||
|
||||
@ -156,8 +158,8 @@ func dcSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *replicaset.R
|
||||
|
||||
metrics.UnregisterMetricAndUntrackRateLimiterUsage("deployment_controller")
|
||||
dc, err := deployment.NewDeploymentController(
|
||||
informers.Extensions().V1beta1().Deployments(),
|
||||
informers.Extensions().V1beta1().ReplicaSets(),
|
||||
informers.Apps().V1().Deployments(),
|
||||
informers.Apps().V1().ReplicaSets(),
|
||||
informers.Core().V1().Pods(),
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "deployment-controller")),
|
||||
)
|
||||
@ -165,7 +167,7 @@ func dcSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *replicaset.R
|
||||
t.Fatalf("error creating Deployment controller: %v", err)
|
||||
}
|
||||
rm := replicaset.NewReplicaSetController(
|
||||
informers.Extensions().V1beta1().ReplicaSets(),
|
||||
informers.Apps().V1().ReplicaSets(),
|
||||
informers.Core().V1().Pods(),
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replicaset-controller")),
|
||||
replicaset.BurstReplicas,
|
||||
@ -256,7 +258,7 @@ func (d *deploymentTester) markUpdatedPodsReady(wg *sync.WaitGroup) {
|
||||
}
|
||||
|
||||
func (d *deploymentTester) deploymentComplete() (bool, error) {
|
||||
latest, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
|
||||
latest, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -285,6 +287,8 @@ func (d *deploymentTester) waitForDeploymentCompleteAndCheckRollingAndMarkPodsRe
|
||||
// Manually mark updated Deployment pods as ready in a separate goroutine
|
||||
wg.Add(1)
|
||||
go d.markUpdatedPodsReady(&wg)
|
||||
// Wait for goroutine to finish, for all return paths.
|
||||
defer wg.Wait()
|
||||
|
||||
// Wait for the Deployment status to complete while Deployment pods are becoming ready
|
||||
err := d.waitForDeploymentCompleteAndCheckRolling()
|
||||
@ -292,9 +296,6 @@ func (d *deploymentTester) waitForDeploymentCompleteAndCheckRollingAndMarkPodsRe
|
||||
return fmt.Errorf("failed to wait for Deployment %s to complete: %v", d.deployment.Name, err)
|
||||
}
|
||||
|
||||
// Wait for goroutine to finish
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -319,7 +320,7 @@ func (d *deploymentTester) waitForDeploymentCompleteAndMarkPodsReady() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *deploymentTester) updateDeployment(applyUpdate testutil.UpdateDeploymentFunc) (*v1beta1.Deployment, error) {
|
||||
func (d *deploymentTester) updateDeployment(applyUpdate testutil.UpdateDeploymentFunc) (*apps.Deployment, error) {
|
||||
return testutil.UpdateDeploymentWithRetries(d.c, d.deployment.Namespace, d.deployment.Name, applyUpdate, d.t.Logf, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
@ -330,12 +331,12 @@ func (d *deploymentTester) waitForObservedDeployment(desiredGeneration int64) er
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *deploymentTester) getNewReplicaSet() (*v1beta1.ReplicaSet, error) {
|
||||
deployment, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
|
||||
func (d *deploymentTester) getNewReplicaSet() (*apps.ReplicaSet, error) {
|
||||
deployment, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed retrieving deployment %s: %v", d.deployment.Name, err)
|
||||
}
|
||||
rs, err := deploymentutil.GetNewReplicaSet(deployment, d.c.ExtensionsV1beta1())
|
||||
rs, err := deploymentutil.GetNewReplicaSet(deployment, d.c.AppsV1())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed retrieving new replicaset of deployment %s: %v", d.deployment.Name, err)
|
||||
}
|
||||
@ -353,7 +354,7 @@ func (d *deploymentTester) expectNoNewReplicaSet() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *deploymentTester) expectNewReplicaSet() (*v1beta1.ReplicaSet, error) {
|
||||
func (d *deploymentTester) expectNewReplicaSet() (*apps.ReplicaSet, error) {
|
||||
rs, err := d.getNewReplicaSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -364,11 +365,11 @@ func (d *deploymentTester) expectNewReplicaSet() (*v1beta1.ReplicaSet, error) {
|
||||
return rs, nil
|
||||
}
|
||||
|
||||
func (d *deploymentTester) updateReplicaSet(name string, applyUpdate testutil.UpdateReplicaSetFunc) (*v1beta1.ReplicaSet, error) {
|
||||
func (d *deploymentTester) updateReplicaSet(name string, applyUpdate testutil.UpdateReplicaSetFunc) (*apps.ReplicaSet, error) {
|
||||
return testutil.UpdateReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
func (d *deploymentTester) updateReplicaSetStatus(name string, applyStatusUpdate testutil.UpdateReplicaSetFunc) (*v1beta1.ReplicaSet, error) {
|
||||
func (d *deploymentTester) updateReplicaSetStatus(name string, applyStatusUpdate testutil.UpdateReplicaSetFunc) (*apps.ReplicaSet, error) {
|
||||
return testutil.UpdateReplicaSetStatusWithRetries(d.c, d.deployment.Namespace, name, applyStatusUpdate, d.t.Logf, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
@ -386,7 +387,7 @@ func (d *deploymentTester) waitForDeploymentUpdatedReplicasGTE(minUpdatedReplica
|
||||
return testutil.WaitForDeploymentUpdatedReplicasGTE(d.c, d.deployment.Namespace, d.deployment.Name, minUpdatedReplicas, d.deployment.Generation, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
func (d *deploymentTester) waitForDeploymentWithCondition(reason string, condType v1beta1.DeploymentConditionType) error {
|
||||
func (d *deploymentTester) waitForDeploymentWithCondition(reason string, condType apps.DeploymentConditionType) error {
|
||||
return testutil.WaitForDeploymentWithCondition(d.c, d.deployment.Namespace, d.deployment.Name, reason, condType, d.t.Logf, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
@ -417,13 +418,13 @@ func (d *deploymentTester) listUpdatedPods() ([]v1.Pod, error) {
|
||||
return ownedPods, nil
|
||||
}
|
||||
|
||||
func (d *deploymentTester) waitRSStable(replicaset *v1beta1.ReplicaSet) error {
|
||||
func (d *deploymentTester) waitRSStable(replicaset *apps.ReplicaSet) error {
|
||||
return testutil.WaitRSStable(d.t, d.c, replicaset, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
func (d *deploymentTester) scaleDeployment(newReplicas int32) error {
|
||||
var err error
|
||||
d.deployment, err = d.updateDeployment(func(update *v1beta1.Deployment) {
|
||||
d.deployment, err = d.updateDeployment(func(update *apps.Deployment) {
|
||||
update.Spec.Replicas = &newReplicas
|
||||
})
|
||||
if err != nil {
|
||||
@ -447,7 +448,7 @@ func (d *deploymentTester) scaleDeployment(newReplicas int32) error {
|
||||
// waitForReadyReplicas waits for number of ready replicas to equal number of replicas.
|
||||
func (d *deploymentTester) waitForReadyReplicas() error {
|
||||
if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
deployment, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
|
||||
deployment, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get deployment %q: %v", d.deployment.Name, err)
|
||||
}
|
||||
@ -485,7 +486,7 @@ func (d *deploymentTester) markUpdatedPodsReadyWithoutComplete() error {
|
||||
// Verify all replicas fields of DeploymentStatus have desired count.
|
||||
// Immediately return an error when found a non-matching replicas field.
|
||||
func (d *deploymentTester) checkDeploymentStatusReplicasFields(replicas, updatedReplicas, readyReplicas, availableReplicas, unavailableReplicas int32) error {
|
||||
deployment, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
|
||||
deployment, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get deployment %q: %v", d.deployment.Name, err)
|
||||
}
|
||||
|
5
vendor/k8s.io/kubernetes/test/integration/etcd/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/test/integration/etcd/BUILD
generated
vendored
@ -21,7 +21,6 @@ go_test(
|
||||
"//cmd/kube-apiserver/app/options:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/kubectl/cmd/util:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
@ -38,10 +37,10 @@ go_test(
|
||||
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery/cached:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
],
|
||||
)
|
||||
|
147
vendor/k8s.io/kubernetes/test/integration/etcd/etcd_storage_path_test.go
generated
vendored
147
vendor/k8s.io/kubernetes/test/integration/etcd/etcd_storage_path_test.go
generated
vendored
@ -43,16 +43,14 @@ import (
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
genericapiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
"k8s.io/apiserver/pkg/storage/storagebackend"
|
||||
cacheddiscovery "k8s.io/client-go/discovery/cached"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
kapi "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
||||
@ -60,6 +58,7 @@ import (
|
||||
_ "k8s.io/kubernetes/pkg/master" // TODO what else is needed
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"k8s.io/client-go/restmapper"
|
||||
)
|
||||
|
||||
// Etcd data for all persisted objects.
|
||||
@ -262,6 +261,7 @@ var etcdStorageData = map[schema.GroupVersionResource]struct {
|
||||
gvr("extensions", "v1beta1", "podsecuritypolicies"): {
|
||||
stub: `{"metadata": {"name": "psp1"}, "spec": {"fsGroup": {"rule": "RunAsAny"}, "privileged": true, "runAsUser": {"rule": "RunAsAny"}, "seLinux": {"rule": "MustRunAs"}, "supplementalGroups": {"rule": "RunAsAny"}}}`,
|
||||
expectedEtcdPath: "/registry/podsecuritypolicy/psp1",
|
||||
expectedGVK: gvkP("policy", "v1beta1", "PodSecurityPolicy"),
|
||||
},
|
||||
gvr("extensions", "v1beta1", "ingresses"): {
|
||||
stub: `{"metadata": {"name": "ingress1"}, "spec": {"backend": {"serviceName": "service", "servicePort": 5000}}}`,
|
||||
@ -299,7 +299,6 @@ var etcdStorageData = map[schema.GroupVersionResource]struct {
|
||||
gvr("policy", "v1beta1", "podsecuritypolicies"): {
|
||||
stub: `{"metadata": {"name": "psp2"}, "spec": {"fsGroup": {"rule": "RunAsAny"}, "privileged": true, "runAsUser": {"rule": "RunAsAny"}, "seLinux": {"rule": "MustRunAs"}, "supplementalGroups": {"rule": "RunAsAny"}}}`,
|
||||
expectedEtcdPath: "/registry/podsecuritypolicy/psp2",
|
||||
expectedGVK: gvkP("extensions", "v1beta1", "PodSecurityPolicy"),
|
||||
},
|
||||
// --
|
||||
|
||||
@ -425,6 +424,14 @@ var etcdStorageData = map[schema.GroupVersionResource]struct {
|
||||
gvr("scheduling.k8s.io", "v1alpha1", "priorityclasses"): {
|
||||
stub: `{"metadata":{"name":"pc1"},"Value":1000}`,
|
||||
expectedEtcdPath: "/registry/priorityclasses/pc1",
|
||||
expectedGVK: gvkP("scheduling.k8s.io", "v1beta1", "PriorityClass"),
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/scheduling/v1beta1
|
||||
gvr("scheduling.k8s.io", "v1beta1", "priorityclasses"): {
|
||||
stub: `{"metadata":{"name":"pc2"},"Value":1000}`,
|
||||
expectedEtcdPath: "/registry/priorityclasses/pc2",
|
||||
},
|
||||
// --
|
||||
}
|
||||
@ -434,84 +441,83 @@ var etcdStorageData = map[schema.GroupVersionResource]struct {
|
||||
var ephemeralWhiteList = createEphemeralWhiteList(
|
||||
|
||||
// k8s.io/kubernetes/pkg/api/v1
|
||||
gvr("", "v1", "bindings"), // annotation on pod, not stored in etcd
|
||||
gvr("", "v1", "rangeallocations"), // stored in various places in etcd but cannot be directly created
|
||||
gvr("", "v1", "componentstatuses"), // status info not stored in etcd
|
||||
gvr("", "v1", "serializedreferences"), // used for serilization, not stored in etcd
|
||||
gvr("", "v1", "nodeconfigsources"), // subfield of node.spec, but shouldn't be directly created
|
||||
gvr("", "v1", "podstatusresults"), // wrapper object not stored in etcd
|
||||
gvk("", "v1", "Binding"), // annotation on pod, not stored in etcd
|
||||
gvk("", "v1", "RangeAllocation"), // stored in various places in etcd but cannot be directly created
|
||||
gvk("", "v1", "ComponentStatus"), // status info not stored in etcd
|
||||
gvk("", "v1", "SerializedReference"), // used for serilization, not stored in etcd
|
||||
gvk("", "v1", "PodStatusResult"), // wrapper object not stored in etcd
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/authentication/v1beta1
|
||||
gvr("authentication.k8s.io", "v1beta1", "tokenreviews"), // not stored in etcd
|
||||
gvk("authentication.k8s.io", "v1beta1", "TokenReview"), // not stored in etcd
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/authentication/v1
|
||||
gvr("authentication.k8s.io", "v1", "tokenreviews"), // not stored in etcd
|
||||
gvr("authentication.k8s.io", "v1", "tokenrequests"), // not stored in etcd
|
||||
gvk("authentication.k8s.io", "v1", "TokenReview"), // not stored in etcd
|
||||
gvk("authentication.k8s.io", "v1", "TokenRequest"), // not stored in etcd
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/authorization/v1beta1
|
||||
|
||||
// SRR objects that are not stored in etcd
|
||||
gvr("authorization.k8s.io", "v1beta1", "selfsubjectrulesreviews"),
|
||||
gvk("authorization.k8s.io", "v1beta1", "SelfSubjectRulesReview"),
|
||||
// SAR objects that are not stored in etcd
|
||||
gvr("authorization.k8s.io", "v1beta1", "selfsubjectaccessreviews"),
|
||||
gvr("authorization.k8s.io", "v1beta1", "localsubjectaccessreviews"),
|
||||
gvr("authorization.k8s.io", "v1beta1", "subjectaccessreviews"),
|
||||
gvk("authorization.k8s.io", "v1beta1", "SelfSubjectAccessReview"),
|
||||
gvk("authorization.k8s.io", "v1beta1", "LocalSubjectAccessReview"),
|
||||
gvk("authorization.k8s.io", "v1beta1", "SubjectAccessReview"),
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/authorization/v1
|
||||
|
||||
// SRR objects that are not stored in etcd
|
||||
gvr("authorization.k8s.io", "v1", "selfsubjectrulesreviews"),
|
||||
gvk("authorization.k8s.io", "v1", "SelfSubjectRulesReview"),
|
||||
// SAR objects that are not stored in etcd
|
||||
gvr("authorization.k8s.io", "v1", "selfsubjectaccessreviews"),
|
||||
gvr("authorization.k8s.io", "v1", "localsubjectaccessreviews"),
|
||||
gvr("authorization.k8s.io", "v1", "subjectaccessreviews"),
|
||||
gvk("authorization.k8s.io", "v1", "SelfSubjectAccessReview"),
|
||||
gvk("authorization.k8s.io", "v1", "LocalSubjectAccessReview"),
|
||||
gvk("authorization.k8s.io", "v1", "SubjectAccessReview"),
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/autoscaling/v1
|
||||
gvr("autoscaling", "v1", "scales"), // not stored in etcd, part of kapiv1.ReplicationController
|
||||
gvk("autoscaling", "v1", "Scale"), // not stored in etcd, part of kapiv1.ReplicationController
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/apps/v1beta1
|
||||
gvr("apps", "v1beta1", "scales"), // not stored in etcd, part of kapiv1.ReplicationController
|
||||
gvr("apps", "v1beta1", "deploymentrollbacks"), // used to rollback deployment, not stored in etcd
|
||||
gvk("apps", "v1beta1", "Scale"), // not stored in etcd, part of kapiv1.ReplicationController
|
||||
gvk("apps", "v1beta1", "DeploymentRollback"), // used to rollback deployment, not stored in etcd
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/apps/v1beta2
|
||||
gvr("apps", "v1beta2", "scales"), // not stored in etcd, part of kapiv1.ReplicationController
|
||||
gvk("apps", "v1beta2", "Scale"), // not stored in etcd, part of kapiv1.ReplicationController
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/batch/v1beta1
|
||||
gvr("batch", "v1beta1", "jobtemplates"), // not stored in etcd
|
||||
gvk("batch", "v1beta1", "JobTemplate"), // not stored in etcd
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/batch/v2alpha1
|
||||
gvr("batch", "v2alpha1", "jobtemplates"), // not stored in etcd
|
||||
gvk("batch", "v2alpha1", "JobTemplate"), // not stored in etcd
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1
|
||||
gvr("componentconfig", "v1alpha1", "kubeschedulerconfigurations"), // not stored in etcd
|
||||
gvk("componentconfig", "v1alpha1", "KubeSchedulerConfiguration"), // not stored in etcd
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/extensions/v1beta1
|
||||
gvr("extensions", "v1beta1", "deploymentrollbacks"), // used to rollback deployment, not stored in etcd
|
||||
gvr("extensions", "v1beta1", "replicationcontrollerdummies"), // not stored in etcd
|
||||
gvr("extensions", "v1beta1", "scales"), // not stored in etcd, part of kapiv1.ReplicationController
|
||||
gvk("extensions", "v1beta1", "DeploymentRollback"), // used to rollback deployment, not stored in etcd
|
||||
gvk("extensions", "v1beta1", "ReplicationControllerDummy"), // not stored in etcd
|
||||
gvk("extensions", "v1beta1", "Scale"), // not stored in etcd, part of kapiv1.ReplicationController
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1
|
||||
gvr("imagepolicy.k8s.io", "v1alpha1", "imagereviews"), // not stored in etcd
|
||||
gvk("imagepolicy.k8s.io", "v1alpha1", "ImageReview"), // not stored in etcd
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/policy/v1beta1
|
||||
gvr("policy", "v1beta1", "evictions"), // not stored in etcd, deals with evicting kapiv1.Pod
|
||||
gvk("policy", "v1beta1", "Eviction"), // not stored in etcd, deals with evicting kapiv1.Pod
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/admission/v1beta1
|
||||
gvr("admission.k8s.io", "v1beta1", "admissionreviews"), // not stored in etcd, call out to webhooks.
|
||||
gvk("admission.k8s.io", "v1beta1", "AdmissionReview"), // not stored in etcd, call out to webhooks.
|
||||
// --
|
||||
)
|
||||
|
||||
@ -537,7 +543,7 @@ var kindWhiteList = sets.NewString(
|
||||
"WatchEvent",
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/api/unversioned
|
||||
// k8s.io/apimachinery/pkg/apis/meta/v1
|
||||
"Status",
|
||||
// --
|
||||
)
|
||||
@ -561,7 +567,7 @@ func TestEtcdStoragePath(t *testing.T) {
|
||||
kindSeen := sets.NewString()
|
||||
pathSeen := map[string][]schema.GroupVersionResource{}
|
||||
etcdSeen := map[schema.GroupVersionResource]empty{}
|
||||
ephemeralSeen := map[schema.GroupVersionResource]empty{}
|
||||
ephemeralSeen := map[schema.GroupVersionKind]empty{}
|
||||
cohabitatingResources := map[string]map[schema.GroupVersionKind]empty{}
|
||||
|
||||
for gvk, apiType := range legacyscheme.Scheme.AllKnownTypes() {
|
||||
@ -577,6 +583,11 @@ func TestEtcdStoragePath(t *testing.T) {
|
||||
kindSeen.Insert(kind)
|
||||
continue
|
||||
}
|
||||
_, isEphemeral := ephemeralWhiteList[gvk]
|
||||
if isEphemeral {
|
||||
ephemeralSeen[gvk] = empty{}
|
||||
continue
|
||||
}
|
||||
|
||||
mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
|
||||
if err != nil {
|
||||
@ -584,29 +595,15 @@ func TestEtcdStoragePath(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
|
||||
gvResource := gvk.GroupVersion().WithResource(mapping.Resource)
|
||||
etcdSeen[gvResource] = empty{}
|
||||
etcdSeen[mapping.Resource] = empty{}
|
||||
|
||||
testData, hasTest := etcdStorageData[gvResource]
|
||||
_, isEphemeral := ephemeralWhiteList[gvResource]
|
||||
testData, hasTest := etcdStorageData[mapping.Resource]
|
||||
|
||||
if !hasTest && !isEphemeral {
|
||||
if !hasTest {
|
||||
t.Errorf("no test data for %s from %s. Please add a test for your new type to etcdStorageData.", kind, pkgPath)
|
||||
continue
|
||||
}
|
||||
|
||||
if hasTest && isEphemeral {
|
||||
t.Errorf("duplicate test data for %s from %s. Object has both test data and is ephemeral.", kind, pkgPath)
|
||||
continue
|
||||
}
|
||||
|
||||
if isEphemeral { // TODO it would be nice if we could remove this and infer if an object is not stored in etcd
|
||||
// t.Logf("Skipping test for %s from %s", kind, pkgPath)
|
||||
ephemeralSeen[gvResource] = empty{}
|
||||
delete(etcdSeen, gvResource)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(testData.expectedEtcdPath) == 0 {
|
||||
t.Errorf("empty test data for %s from %s", kind, pkgPath)
|
||||
continue
|
||||
@ -668,7 +665,7 @@ func TestEtcdStoragePath(t *testing.T) {
|
||||
}
|
||||
|
||||
addGVKToEtcdBucket(cohabitatingResources, actualGVK, getEtcdBucket(testData.expectedEtcdPath))
|
||||
pathSeen[testData.expectedEtcdPath] = append(pathSeen[testData.expectedEtcdPath], gvResource)
|
||||
pathSeen[testData.expectedEtcdPath] = append(pathSeen[testData.expectedEtcdPath], mapping.Resource)
|
||||
}()
|
||||
}
|
||||
|
||||
@ -733,21 +730,25 @@ func startRealMasterOrDie(t *testing.T, certDir string) (*allClient, clientv3.KV
|
||||
kubeAPIServerOptions.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURL()}
|
||||
kubeAPIServerOptions.Etcd.DefaultStorageMediaType = runtime.ContentTypeJSON // TODO use protobuf?
|
||||
kubeAPIServerOptions.ServiceClusterIPRange = *defaultServiceClusterIPRange
|
||||
kubeAPIServerOptions.Authorization.Mode = "RBAC"
|
||||
kubeAPIServerOptions.Authorization.Modes = []string{"RBAC"}
|
||||
kubeAPIServerOptions.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
|
||||
|
||||
tunneler, proxyTransport, err := app.CreateNodeDialer(kubeAPIServerOptions)
|
||||
completedOptions, err := app.Complete(kubeAPIServerOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerConfig, sharedInformers, versionedInformers, _, _, err := app.CreateKubeAPIServerConfig(kubeAPIServerOptions, tunneler, proxyTransport)
|
||||
|
||||
tunneler, proxyTransport, err := app.CreateNodeDialer(completedOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerConfig, sharedInformers, versionedInformers, _, _, _, admissionPostStartHook, err := app.CreateKubeAPIServerConfig(completedOptions, tunneler, proxyTransport)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kubeAPIServerConfig.ExtraConfig.APIResourceConfigSource = &allResourceSource{} // force enable all resources
|
||||
|
||||
kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.EmptyDelegate, sharedInformers, versionedInformers)
|
||||
kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.NewEmptyDelegate(), sharedInformers, versionedInformers, admissionPostStartHook)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -807,9 +808,11 @@ func startRealMasterOrDie(t *testing.T, certDir string) (*allClient, clientv3.KV
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mapper, _ := util.NewFactory(clientcmd.NewDefaultClientConfig(*clientcmdapi.NewConfig(), &clientcmd.ConfigOverrides{})).Object()
|
||||
discoveryClient := cacheddiscovery.NewMemCacheClient(kubeClient.Discovery())
|
||||
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient)
|
||||
restMapper.Reset()
|
||||
|
||||
return client, kvClient, mapper
|
||||
return client, kvClient, restMapper
|
||||
}
|
||||
|
||||
func dumpEtcdKVOnFailure(t *testing.T, kvClient clientv3.KV) {
|
||||
@ -884,17 +887,21 @@ func gvr(g, v, r string) schema.GroupVersionResource {
|
||||
return schema.GroupVersionResource{Group: g, Version: v, Resource: r}
|
||||
}
|
||||
|
||||
func gvk(g, v, k string) schema.GroupVersionKind {
|
||||
return schema.GroupVersionKind{Group: g, Version: v, Kind: k}
|
||||
}
|
||||
|
||||
func gvkP(g, v, k string) *schema.GroupVersionKind {
|
||||
return &schema.GroupVersionKind{Group: g, Version: v, Kind: k}
|
||||
}
|
||||
|
||||
func createEphemeralWhiteList(gvrs ...schema.GroupVersionResource) map[schema.GroupVersionResource]empty {
|
||||
ephemeral := map[schema.GroupVersionResource]empty{}
|
||||
for _, gvResource := range gvrs {
|
||||
if _, ok := ephemeral[gvResource]; ok {
|
||||
func createEphemeralWhiteList(gvks ...schema.GroupVersionKind) map[schema.GroupVersionKind]empty {
|
||||
ephemeral := map[schema.GroupVersionKind]empty{}
|
||||
for _, gvKind := range gvks {
|
||||
if _, ok := ephemeral[gvKind]; ok {
|
||||
panic("invalid ephemeral whitelist contains duplicate keys")
|
||||
}
|
||||
ephemeral[gvResource] = empty{}
|
||||
ephemeral[gvKind] = empty{}
|
||||
}
|
||||
return ephemeral
|
||||
}
|
||||
@ -952,7 +959,7 @@ func (c *allClient) create(stub, ns string, mapping *meta.RESTMapping, all *[]cl
|
||||
return err
|
||||
}
|
||||
namespaced := mapping.Scope.Name() == meta.RESTScopeNameNamespace
|
||||
output, err := req.NamespaceIfScoped(ns, namespaced).Resource(mapping.Resource).Body(strings.NewReader(stub)).Do().Get()
|
||||
output, err := req.NamespaceIfScoped(ns, namespaced).Resource(mapping.Resource.Resource).Body(strings.NewReader(stub)).Do().Get()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -966,15 +973,15 @@ func (c *allClient) destroy(obj runtime.Object, mapping *meta.RESTMapping) error
|
||||
return err
|
||||
}
|
||||
namespaced := mapping.Scope.Name() == meta.RESTScopeNameNamespace
|
||||
name, err := mapping.MetadataAccessor.Name(obj)
|
||||
name, err := meta.NewAccessor().Name(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ns, err := mapping.MetadataAccessor.Namespace(obj)
|
||||
ns, err := meta.NewAccessor().Namespace(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return req.NamespaceIfScoped(ns, namespaced).Resource(mapping.Resource).Name(name).Do().Error()
|
||||
return req.NamespaceIfScoped(ns, namespaced).Resource(mapping.Resource.Resource).Name(name).Do().Error()
|
||||
}
|
||||
|
||||
func (c *allClient) cleanup(all *[]cleanupData) error {
|
||||
|
10
vendor/k8s.io/kubernetes/test/integration/examples/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/test/integration/examples/BUILD
generated
vendored
@ -11,15 +11,24 @@ go_test(
|
||||
srcs = [
|
||||
"apiserver_test.go",
|
||||
"main_test.go",
|
||||
"setup_test.go",
|
||||
"webhook_test.go",
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app:go_default_library",
|
||||
"//cmd/kube-apiserver/app/options:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//pkg/master/reconcilers:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/pborman/uuid:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
@ -31,6 +40,7 @@ go_test(
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/cmd/server:go_default_library",
|
||||
"//vendor/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/sample-apiserver/pkg/apis/wardle/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/sample-apiserver/pkg/cmd/server:go_default_library",
|
||||
],
|
||||
)
|
||||
|
55
vendor/k8s.io/kubernetes/test/integration/examples/apiserver_test.go
generated
vendored
55
vendor/k8s.io/kubernetes/test/integration/examples/apiserver_test.go
generated
vendored
@ -46,17 +46,11 @@ import (
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
"k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1"
|
||||
wardlev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1"
|
||||
wardlev1beta1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1beta1"
|
||||
sampleserver "k8s.io/sample-apiserver/pkg/cmd/server"
|
||||
)
|
||||
|
||||
var groupVersion = v1alpha1.SchemeGroupVersion
|
||||
|
||||
var groupVersionForDiscovery = metav1.GroupVersionForDiscovery{
|
||||
GroupVersion: groupVersion.String(),
|
||||
Version: groupVersion.Version,
|
||||
}
|
||||
|
||||
func TestAggregatedAPIServer(t *testing.T) {
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
@ -109,13 +103,17 @@ func TestAggregatedAPIServer(t *testing.T) {
|
||||
kubeAPIServerOptions.Authentication.RequestHeader.AllowedNames = []string{"kube-aggregator"}
|
||||
kubeAPIServerOptions.Authentication.RequestHeader.ClientCAFile = proxyCACertFile.Name()
|
||||
kubeAPIServerOptions.Authentication.ClientCert.ClientCA = clientCACertFile.Name()
|
||||
kubeAPIServerOptions.Authorization.Mode = "RBAC"
|
||||
|
||||
tunneler, proxyTransport, err := app.CreateNodeDialer(kubeAPIServerOptions)
|
||||
kubeAPIServerOptions.Authorization.Modes = []string{"RBAC"}
|
||||
completedOptions, err := app.Complete(kubeAPIServerOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerConfig, sharedInformers, versionedInformers, _, _, err := app.CreateKubeAPIServerConfig(kubeAPIServerOptions, tunneler, proxyTransport)
|
||||
|
||||
tunneler, proxyTransport, err := app.CreateNodeDialer(completedOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerConfig, sharedInformers, versionedInformers, _, _, _, admissionPostStartHook, err := app.CreateKubeAPIServerConfig(completedOptions, tunneler, proxyTransport)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -126,7 +124,7 @@ func TestAggregatedAPIServer(t *testing.T) {
|
||||
kubeAPIServerClientConfig.ServerName = ""
|
||||
kubeClientConfigValue.Store(kubeAPIServerClientConfig)
|
||||
|
||||
kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.EmptyDelegate, sharedInformers, versionedInformers)
|
||||
kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.NewEmptyDelegate(), sharedInformers, versionedInformers, admissionPostStartHook)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -137,7 +135,7 @@ func TestAggregatedAPIServer(t *testing.T) {
|
||||
}()
|
||||
|
||||
// just use json because everyone speaks it
|
||||
err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (done bool, err error) {
|
||||
err = wait.PollImmediate(time.Second, time.Minute, func() (done bool, err error) {
|
||||
obj := kubeClientConfigValue.Load()
|
||||
if obj == nil {
|
||||
return false, nil
|
||||
@ -419,10 +417,21 @@ func testAPIGroupList(t *testing.T, client rest.Interface) {
|
||||
t.Fatalf("Error in unmarshalling response from server %s: %v", "/apis", err)
|
||||
}
|
||||
assert.Equal(t, 1, len(apiGroupList.Groups))
|
||||
assert.Equal(t, groupVersion.Group, apiGroupList.Groups[0].Name)
|
||||
assert.Equal(t, 1, len(apiGroupList.Groups[0].Versions))
|
||||
assert.Equal(t, groupVersionForDiscovery, apiGroupList.Groups[0].Versions[0])
|
||||
assert.Equal(t, groupVersionForDiscovery, apiGroupList.Groups[0].PreferredVersion)
|
||||
assert.Equal(t, wardlev1alpha1.GroupName, apiGroupList.Groups[0].Name)
|
||||
assert.Equal(t, 2, len(apiGroupList.Groups[0].Versions))
|
||||
|
||||
v1alpha1 := metav1.GroupVersionForDiscovery{
|
||||
GroupVersion: wardlev1alpha1.SchemeGroupVersion.String(),
|
||||
Version: wardlev1alpha1.SchemeGroupVersion.Version,
|
||||
}
|
||||
v1beta1 := metav1.GroupVersionForDiscovery{
|
||||
GroupVersion: wardlev1beta1.SchemeGroupVersion.String(),
|
||||
Version: wardlev1beta1.SchemeGroupVersion.Version,
|
||||
}
|
||||
|
||||
assert.Equal(t, v1beta1, apiGroupList.Groups[0].Versions[0])
|
||||
assert.Equal(t, v1alpha1, apiGroupList.Groups[0].Versions[1])
|
||||
assert.Equal(t, v1beta1, apiGroupList.Groups[0].PreferredVersion)
|
||||
}
|
||||
|
||||
func testAPIGroup(t *testing.T, client rest.Interface) {
|
||||
@ -436,10 +445,10 @@ func testAPIGroup(t *testing.T, client rest.Interface) {
|
||||
if err != nil {
|
||||
t.Fatalf("Error in unmarshalling response from server %s: %v", "/apis/wardle.k8s.io", err)
|
||||
}
|
||||
assert.Equal(t, groupVersion.Group, apiGroup.Name)
|
||||
assert.Equal(t, 1, len(apiGroup.Versions))
|
||||
assert.Equal(t, groupVersion.String(), apiGroup.Versions[0].GroupVersion)
|
||||
assert.Equal(t, groupVersion.Version, apiGroup.Versions[0].Version)
|
||||
assert.Equal(t, wardlev1alpha1.SchemeGroupVersion.Group, apiGroup.Name)
|
||||
assert.Equal(t, 2, len(apiGroup.Versions))
|
||||
assert.Equal(t, wardlev1alpha1.SchemeGroupVersion.String(), apiGroup.Versions[1].GroupVersion)
|
||||
assert.Equal(t, wardlev1alpha1.SchemeGroupVersion.Version, apiGroup.Versions[1].Version)
|
||||
assert.Equal(t, apiGroup.PreferredVersion, apiGroup.Versions[0])
|
||||
}
|
||||
|
||||
@ -454,7 +463,7 @@ func testAPIResourceList(t *testing.T, client rest.Interface) {
|
||||
if err != nil {
|
||||
t.Fatalf("Error in unmarshalling response from server %s: %v", "/apis/wardle.k8s.io/v1alpha1", err)
|
||||
}
|
||||
assert.Equal(t, groupVersion.String(), apiResourceList.GroupVersion)
|
||||
assert.Equal(t, wardlev1alpha1.SchemeGroupVersion.String(), apiResourceList.GroupVersion)
|
||||
assert.Equal(t, 2, len(apiResourceList.APIResources))
|
||||
assert.Equal(t, "fischers", apiResourceList.APIResources[0].Name)
|
||||
assert.False(t, apiResourceList.APIResources[0].Namespaced)
|
||||
|
168
vendor/k8s.io/kubernetes/test/integration/examples/setup_test.go
generated
vendored
Normal file
168
vendor/k8s.io/kubernetes/test/integration/examples/setup_test.go
generated
vendored
Normal file
@ -0,0 +1,168 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apiserver
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pborman/uuid"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
genericapiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
client "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/cert"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||
"k8s.io/kubernetes/pkg/master"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
type TestServerSetup struct {
|
||||
ModifyServerRunOptions func(*options.ServerRunOptions)
|
||||
ModifyServerConfig func(*master.Config)
|
||||
}
|
||||
|
||||
// startTestServer runs a kube-apiserver, optionally calling out to the setup.ModifyServerRunOptions and setup.ModifyServerConfig functions
|
||||
func startTestServer(t *testing.T, stopCh <-chan struct{}, setup TestServerSetup) (client.Interface, *rest.Config) {
|
||||
certDir, _ := ioutil.TempDir("", "test-integration-"+t.Name())
|
||||
go func() {
|
||||
<-stopCh
|
||||
os.RemoveAll(certDir)
|
||||
}()
|
||||
|
||||
_, defaultServiceClusterIPRange, _ := net.ParseCIDR("10.0.0.0/24")
|
||||
proxySigningKey, err := cert.NewPrivateKey()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
proxySigningCert, err := cert.NewSelfSignedCACert(cert.Config{CommonName: "front-proxy-ca"}, proxySigningKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
proxyCACertFile, _ := ioutil.TempFile(certDir, "proxy-ca.crt")
|
||||
if err := ioutil.WriteFile(proxyCACertFile.Name(), cert.EncodeCertPEM(proxySigningCert), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clientSigningKey, err := cert.NewPrivateKey()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clientSigningCert, err := cert.NewSelfSignedCACert(cert.Config{CommonName: "client-ca"}, clientSigningKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clientCACertFile, _ := ioutil.TempFile(certDir, "client-ca.crt")
|
||||
if err := ioutil.WriteFile(clientCACertFile.Name(), cert.EncodeCertPEM(clientSigningCert), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
listener, _, err := genericapiserveroptions.CreateListener("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kubeAPIServerOptions := options.NewServerRunOptions()
|
||||
kubeAPIServerOptions.SecureServing.Listener = listener
|
||||
kubeAPIServerOptions.SecureServing.BindAddress = net.ParseIP("127.0.0.1")
|
||||
kubeAPIServerOptions.SecureServing.ServerCert.CertDirectory = certDir
|
||||
kubeAPIServerOptions.InsecureServing.BindPort = 0
|
||||
kubeAPIServerOptions.Etcd.StorageConfig.Prefix = path.Join("/", uuid.New(), "registry")
|
||||
kubeAPIServerOptions.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURL()}
|
||||
kubeAPIServerOptions.ServiceClusterIPRange = *defaultServiceClusterIPRange
|
||||
kubeAPIServerOptions.Authentication.RequestHeader.UsernameHeaders = []string{"X-Remote-User"}
|
||||
kubeAPIServerOptions.Authentication.RequestHeader.GroupHeaders = []string{"X-Remote-Group"}
|
||||
kubeAPIServerOptions.Authentication.RequestHeader.ExtraHeaderPrefixes = []string{"X-Remote-Extra-"}
|
||||
kubeAPIServerOptions.Authentication.RequestHeader.AllowedNames = []string{"kube-aggregator"}
|
||||
kubeAPIServerOptions.Authentication.RequestHeader.ClientCAFile = proxyCACertFile.Name()
|
||||
kubeAPIServerOptions.Authentication.ClientCert.ClientCA = clientCACertFile.Name()
|
||||
kubeAPIServerOptions.Authorization.Modes = []string{"Node", "RBAC"}
|
||||
|
||||
if setup.ModifyServerRunOptions != nil {
|
||||
setup.ModifyServerRunOptions(kubeAPIServerOptions)
|
||||
}
|
||||
|
||||
completedOptions, err := app.Complete(kubeAPIServerOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tunneler, proxyTransport, err := app.CreateNodeDialer(completedOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerConfig, sharedInformers, versionedInformers, _, _, _, admissionPostStartHook, err := app.CreateKubeAPIServerConfig(completedOptions, tunneler, proxyTransport)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if setup.ModifyServerConfig != nil {
|
||||
setup.ModifyServerConfig(kubeAPIServerConfig)
|
||||
}
|
||||
kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.NewEmptyDelegate(), sharedInformers, versionedInformers, admissionPostStartHook)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
go func() {
|
||||
if err := kubeAPIServer.GenericAPIServer.PrepareRun().Run(stopCh); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Adjust the loopback config for external use (external server name and CA)
|
||||
kubeAPIServerClientConfig := rest.CopyConfig(kubeAPIServerConfig.GenericConfig.LoopbackClientConfig)
|
||||
kubeAPIServerClientConfig.CAFile = path.Join(certDir, "apiserver.crt")
|
||||
kubeAPIServerClientConfig.CAData = nil
|
||||
kubeAPIServerClientConfig.ServerName = ""
|
||||
|
||||
// wait for health
|
||||
err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (done bool, err error) {
|
||||
healthzConfig := rest.CopyConfig(kubeAPIServerClientConfig)
|
||||
healthzConfig.ContentType = ""
|
||||
healthzConfig.AcceptContentTypes = ""
|
||||
kubeClient, err := client.NewForConfig(healthzConfig)
|
||||
if err != nil {
|
||||
// this happens because we race the API server start
|
||||
t.Log(err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
healthStatus := 0
|
||||
kubeClient.Discovery().RESTClient().Get().AbsPath("/healthz").Do().StatusCode(&healthStatus)
|
||||
if healthStatus != http.StatusOK {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kubeAPIServerClient, err := client.NewForConfig(kubeAPIServerClientConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return kubeAPIServerClient, kubeAPIServerClientConfig
|
||||
}
|
118
vendor/k8s.io/kubernetes/test/integration/examples/webhook_test.go
generated
vendored
Normal file
118
vendor/k8s.io/kubernetes/test/integration/examples/webhook_test.go
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apiserver
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
admissionv1beta1 "k8s.io/api/admissionregistration/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
auditinternal "k8s.io/apiserver/pkg/apis/audit"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||
"k8s.io/kubernetes/pkg/master"
|
||||
"k8s.io/kubernetes/pkg/master/reconcilers"
|
||||
)
|
||||
|
||||
func TestWebhookLoopback(t *testing.T) {
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
webhookPath := "/webhook-test"
|
||||
|
||||
called := int32(0)
|
||||
|
||||
client, _ := startTestServer(t, stopCh, TestServerSetup{
|
||||
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
||||
},
|
||||
ModifyServerConfig: func(config *master.Config) {
|
||||
// Avoid resolveable kubernetes service
|
||||
config.ExtraConfig.EndpointReconcilerType = reconcilers.NoneEndpointReconcilerType
|
||||
|
||||
// Hook into audit to watch requests
|
||||
config.GenericConfig.AuditBackend = auditSinkFunc(func(events ...*auditinternal.Event) {})
|
||||
config.GenericConfig.AuditPolicyChecker = auditChecker(func(attrs authorizer.Attributes) (auditinternal.Level, []auditinternal.Stage) {
|
||||
if attrs.GetPath() == webhookPath {
|
||||
if attrs.GetUser().GetName() != "system:apiserver" {
|
||||
t.Errorf("expected user %q, got %q", "system:apiserver", attrs.GetUser().GetName())
|
||||
}
|
||||
atomic.AddInt32(&called, 1)
|
||||
}
|
||||
return auditinternal.LevelNone, nil
|
||||
})
|
||||
},
|
||||
})
|
||||
|
||||
fail := admissionv1beta1.Fail
|
||||
_, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&admissionv1beta1.MutatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "webhooktest.example.com"},
|
||||
Webhooks: []admissionv1beta1.Webhook{{
|
||||
Name: "webhooktest.example.com",
|
||||
ClientConfig: admissionv1beta1.WebhookClientConfig{
|
||||
Service: &admissionv1beta1.ServiceReference{Namespace: "default", Name: "kubernetes", Path: &webhookPath},
|
||||
},
|
||||
Rules: []admissionv1beta1.RuleWithOperations{{
|
||||
Operations: []admissionv1beta1.OperationType{admissionv1beta1.OperationAll},
|
||||
Rule: admissionv1beta1.Rule{APIGroups: []string{""}, APIVersions: []string{"v1"}, Resources: []string{"configmaps"}},
|
||||
}},
|
||||
FailurePolicy: &fail,
|
||||
}},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (done bool, err error) {
|
||||
_, err = client.CoreV1().ConfigMaps("default").Create(&v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "webhook-test"},
|
||||
Data: map[string]string{"invalid key": "value"},
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected success")
|
||||
}
|
||||
if called > 0 {
|
||||
return true, nil
|
||||
}
|
||||
t.Logf("%v", err)
|
||||
t.Logf("webhook not called yet, continuing...")
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
type auditChecker func(authorizer.Attributes) (auditinternal.Level, []auditinternal.Stage)
|
||||
|
||||
func (f auditChecker) LevelAndStages(attrs authorizer.Attributes) (auditinternal.Level, []auditinternal.Stage) {
|
||||
return f(attrs)
|
||||
}
|
||||
|
||||
type auditSinkFunc func(events ...*auditinternal.Event)
|
||||
|
||||
func (f auditSinkFunc) ProcessEvents(events ...*auditinternal.Event) {
|
||||
f(events...)
|
||||
}
|
||||
func (auditSinkFunc) Run(stopCh <-chan struct{}) error {
|
||||
return nil
|
||||
}
|
||||
func (auditSinkFunc) Shutdown() {
|
||||
}
|
2
vendor/k8s.io/kubernetes/test/integration/framework/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/framework/BUILD
generated
vendored
@ -28,7 +28,6 @@ go_library(
|
||||
"//pkg/master:go_default_library",
|
||||
"//pkg/util/env:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//plugin/pkg/admission/admit:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/go-openapi/spec:go_default_library",
|
||||
@ -54,6 +53,7 @@ go_library(
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/union:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/endpoints/openapi:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/storage:go_default_library",
|
||||
|
34
vendor/k8s.io/kubernetes/test/integration/framework/master_utils.go
generated
vendored
34
vendor/k8s.io/kubernetes/test/integration/framework/master_utils.go
generated
vendored
@ -44,6 +44,7 @@ import (
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizerfactory"
|
||||
authorizerunion "k8s.io/apiserver/pkg/authorization/union"
|
||||
openapinamer "k8s.io/apiserver/pkg/endpoints/openapi"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/apiserver/pkg/server/options"
|
||||
serverstorage "k8s.io/apiserver/pkg/server/storage"
|
||||
@ -59,7 +60,6 @@ import (
|
||||
kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
|
||||
"k8s.io/kubernetes/pkg/master"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/admit"
|
||||
)
|
||||
|
||||
// Config is a struct of configuration directives for NewMasterComponents.
|
||||
@ -125,9 +125,7 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv
|
||||
|
||||
if masterConfig == nil {
|
||||
masterConfig = NewMasterConfig()
|
||||
masterConfig.GenericConfig.EnableProfiling = true
|
||||
masterConfig.GenericConfig.EnableMetrics = true
|
||||
masterConfig.GenericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(openapi.GetOpenAPIDefinitions, legacyscheme.Scheme)
|
||||
masterConfig.GenericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(openapi.GetOpenAPIDefinitions, openapinamer.NewDefinitionNamer(legacyscheme.Scheme))
|
||||
masterConfig.GenericConfig.OpenAPIConfig.Info = &spec.Info{
|
||||
InfoProps: spec.InfoProps{
|
||||
Title: "Kubernetes",
|
||||
@ -180,7 +178,7 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv
|
||||
}
|
||||
|
||||
sharedInformers := informers.NewSharedInformerFactory(clientset, masterConfig.GenericConfig.LoopbackClientConfig.Timeout)
|
||||
m, err = masterConfig.Complete(sharedInformers).New(genericapiserver.EmptyDelegate)
|
||||
m, err = masterConfig.Complete(sharedInformers).New(genericapiserver.NewEmptyDelegate())
|
||||
if err != nil {
|
||||
closeFn()
|
||||
glog.Fatalf("error in bringing up the master: %v", err)
|
||||
@ -202,6 +200,7 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv
|
||||
closeFn()
|
||||
glog.Fatal(err)
|
||||
}
|
||||
var lastHealthContent []byte
|
||||
err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
result := privilegedClient.Get().AbsPath("/healthz").Do()
|
||||
status := 0
|
||||
@ -209,16 +208,26 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv
|
||||
if status == 200 {
|
||||
return true, nil
|
||||
}
|
||||
lastHealthContent, _ = result.Raw()
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
closeFn()
|
||||
glog.Errorf("last health content: %q", string(lastHealthContent))
|
||||
glog.Fatal(err)
|
||||
}
|
||||
|
||||
return m, s, closeFn
|
||||
}
|
||||
|
||||
// Returns the master config appropriate for most integration tests.
|
||||
func NewIntegrationTestMasterConfig() *master.Config {
|
||||
masterConfig := NewMasterConfig()
|
||||
masterConfig.GenericConfig.PublicAddress = net.ParseIP("192.168.10.4")
|
||||
masterConfig.ExtraConfig.APIResourceConfigSource = master.DefaultAPIResourceConfigSource()
|
||||
return masterConfig
|
||||
}
|
||||
|
||||
// Returns a basic master config.
|
||||
func NewMasterConfig() *master.Config {
|
||||
// This causes the integration tests to exercise the etcd
|
||||
@ -230,7 +239,7 @@ func NewMasterConfig() *master.Config {
|
||||
info, _ := runtime.SerializerInfoForMediaType(legacyscheme.Codecs.SupportedMediaTypes(), runtime.ContentTypeJSON)
|
||||
ns := NewSingleContentTypeSerializer(legacyscheme.Scheme, info)
|
||||
|
||||
resourceEncoding := serverstorage.NewDefaultResourceEncodingConfig(legacyscheme.Registry)
|
||||
resourceEncoding := serverstorage.NewDefaultResourceEncodingConfig(legacyscheme.Scheme)
|
||||
// FIXME (soltysh): this GroupVersionResource override should be configurable
|
||||
// we need to set both for the whole group and for cronjobs, separately
|
||||
resourceEncoding.SetVersionEncoding(batch.GroupName, *testapi.Batch.GroupVersion(), schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal})
|
||||
@ -281,8 +290,6 @@ func NewMasterConfig() *master.Config {
|
||||
kubeVersion := version.Get()
|
||||
genericConfig.Version = &kubeVersion
|
||||
genericConfig.Authorization.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer()
|
||||
genericConfig.AdmissionControl = admit.NewAlwaysAdmit()
|
||||
genericConfig.EnableMetrics = true
|
||||
|
||||
err := etcdOptions.ApplyWithStorageFactoryTo(storageFactory, genericConfig)
|
||||
if err != nil {
|
||||
@ -294,7 +301,6 @@ func NewMasterConfig() *master.Config {
|
||||
ExtraConfig: master.ExtraConfig{
|
||||
APIResourceConfigSource: master.DefaultAPIResourceConfigSource(),
|
||||
StorageFactory: storageFactory,
|
||||
EnableCoreControllers: true,
|
||||
KubeletClientConfig: kubeletclient.KubeletClientConfig{Port: 10250},
|
||||
APIServerServicePort: 443,
|
||||
MasterCount: 1,
|
||||
@ -302,15 +308,6 @@ func NewMasterConfig() *master.Config {
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the master config appropriate for most integration tests.
|
||||
func NewIntegrationTestMasterConfig() *master.Config {
|
||||
masterConfig := NewMasterConfig()
|
||||
masterConfig.ExtraConfig.EnableCoreControllers = true
|
||||
masterConfig.GenericConfig.PublicAddress = net.ParseIP("192.168.10.4")
|
||||
masterConfig.ExtraConfig.APIResourceConfigSource = master.DefaultAPIResourceConfigSource()
|
||||
return masterConfig
|
||||
}
|
||||
|
||||
// CloseFunc can be called to cleanup the master
|
||||
type CloseFunc func()
|
||||
|
||||
@ -318,7 +315,6 @@ func RunAMaster(masterConfig *master.Config) (*master.Master, *httptest.Server,
|
||||
if masterConfig == nil {
|
||||
masterConfig = NewMasterConfig()
|
||||
masterConfig.GenericConfig.EnableProfiling = true
|
||||
masterConfig.GenericConfig.EnableMetrics = true
|
||||
}
|
||||
return startMasterOrDie(masterConfig, nil, nil)
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/test/integration/framework/perf_utils.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/integration/framework/perf_utils.go
generated
vendored
@ -56,10 +56,6 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: p.nodeNamePrefix,
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
// TODO: investigate why this is needed.
|
||||
ExternalID: "foo",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
@ -76,7 +72,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
|
||||
var err error
|
||||
for retry := 0; retry < retries; retry++ {
|
||||
_, err = p.client.CoreV1().Nodes().Create(baseNode)
|
||||
if err == nil || !e2eframework.IsRetryableAPIError(err) {
|
||||
if err == nil || !testutils.IsRetryableAPIError(err) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
23
vendor/k8s.io/kubernetes/test/integration/framework/util.go
generated
vendored
23
vendor/k8s.io/kubernetes/test/integration/framework/util.go
generated
vendored
@ -20,12 +20,10 @@ package framework
|
||||
|
||||
import (
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -35,27 +33,6 @@ const (
|
||||
currentPodInfraContainerImageVersion = "3.1"
|
||||
)
|
||||
|
||||
// GetServerArchitecture fetches the architecture of the cluster's apiserver.
|
||||
func GetServerArchitecture(c clientset.Interface) string {
|
||||
arch := ""
|
||||
sVer, err := c.Discovery().ServerVersion()
|
||||
if err != nil || sVer.Platform == "" {
|
||||
// If we failed to get the server version for some reason, default to amd64.
|
||||
arch = "amd64"
|
||||
} else {
|
||||
// Split the platform string into OS and Arch separately.
|
||||
// The platform string may for example be "linux/amd64", "linux/arm" or "windows/amd64".
|
||||
osArchArray := strings.Split(sVer.Platform, "/")
|
||||
arch = osArchArray[1]
|
||||
}
|
||||
return arch
|
||||
}
|
||||
|
||||
// GetPauseImageName fetches the pause image name for the same architecture as the apiserver.
|
||||
func GetPauseImageName(c clientset.Interface) string {
|
||||
return currentPodInfraContainerImageName + "-" + GetServerArchitecture(c) + ":" + currentPodInfraContainerImageVersion
|
||||
}
|
||||
|
||||
func CreateTestingNamespace(baseName string, apiserver *httptest.Server, t *testing.T) *v1.Namespace {
|
||||
// TODO: Create a namespace with a given basename.
|
||||
// Currently we neither create the namespace nor delete all of its contents at the end.
|
||||
|
3
vendor/k8s.io/kubernetes/test/integration/garbagecollector/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/test/integration/garbagecollector/BUILD
generated
vendored
@ -23,14 +23,15 @@ go_test(
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery/cached:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -51,7 +51,7 @@ func (b *readDelayer) Read(p []byte) (n int, err error) {
|
||||
|
||||
func TestClusterScopedOwners(t *testing.T) {
|
||||
// Start the test server and wrap the client to delay PV watch responses
|
||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, framework.SharedEtcd())
|
||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
||||
server.ClientConfig.WrapTransport = func(rt http.RoundTripper) http.RoundTripper {
|
||||
return roundTripFunc(func(req *http.Request) (*http.Response, error) {
|
||||
if req.URL.Query().Get("watch") != "true" || !strings.Contains(req.URL.String(), "persistentvolumes") {
|
||||
|
79
vendor/k8s.io/kubernetes/test/integration/garbagecollector/garbage_collector_test.go
generated
vendored
79
vendor/k8s.io/kubernetes/test/integration/garbagecollector/garbage_collector_test.go
generated
vendored
@ -32,14 +32,15 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
"k8s.io/client-go/discovery"
|
||||
cacheddiscovery "k8s.io/client-go/discovery/cached"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/restmapper"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/pkg/controller/garbagecollector"
|
||||
@ -167,23 +168,23 @@ func link(t *testing.T, owner, dependent metav1.Object) {
|
||||
|
||||
func createRandomCustomResourceDefinition(
|
||||
t *testing.T, apiExtensionClient apiextensionsclientset.Interface,
|
||||
clientPool dynamic.ClientPool,
|
||||
dynamicClient dynamic.Interface,
|
||||
namespace string,
|
||||
) (*apiextensionsv1beta1.CustomResourceDefinition, dynamic.ResourceInterface) {
|
||||
// Create a random custom resource definition and ensure it's available for
|
||||
// use.
|
||||
definition := apiextensionstestserver.NewRandomNameCustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped)
|
||||
|
||||
client, err := apiextensionstestserver.CreateNewCustomResourceDefinition(definition, apiExtensionClient, clientPool)
|
||||
definition, err := apiextensionstestserver.CreateNewCustomResourceDefinition(definition, apiExtensionClient, dynamicClient)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create CustomResourceDefinition: %v", err)
|
||||
}
|
||||
|
||||
// Get a client for the custom resource.
|
||||
resourceClient := client.Resource(&metav1.APIResource{
|
||||
Name: definition.Spec.Names.Plural,
|
||||
Namespaced: true,
|
||||
}, namespace)
|
||||
gvr := schema.GroupVersionResource{Group: definition.Spec.Group, Version: definition.Spec.Version, Resource: definition.Spec.Names.Plural}
|
||||
|
||||
resourceClient := dynamicClient.Resource(gvr).Namespace(namespace)
|
||||
|
||||
return definition, resourceClient
|
||||
}
|
||||
|
||||
@ -192,7 +193,7 @@ type testContext struct {
|
||||
gc *garbagecollector.GarbageCollector
|
||||
clientSet clientset.Interface
|
||||
apiExtensionClient apiextensionsclientset.Interface
|
||||
clientPool dynamic.ClientPool
|
||||
dynamicClient dynamic.Interface
|
||||
startGC func(workers int)
|
||||
// syncPeriod is how often the GC started with startGC will be resynced.
|
||||
syncPeriod time.Duration
|
||||
@ -200,7 +201,7 @@ type testContext struct {
|
||||
|
||||
// if workerCount > 0, will start the GC, otherwise it's up to the caller to Run() the GC.
|
||||
func setup(t *testing.T, workerCount int) *testContext {
|
||||
return setupWithServer(t, kubeapiservertesting.StartTestServerOrDie(t, nil, framework.SharedEtcd()), workerCount)
|
||||
return setupWithServer(t, kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd()), workerCount)
|
||||
}
|
||||
|
||||
func setupWithServer(t *testing.T, result *kubeapiservertesting.TestServer, workerCount int) *testContext {
|
||||
@ -219,19 +220,19 @@ func setupWithServer(t *testing.T, result *kubeapiservertesting.TestServer, work
|
||||
createNamespaceOrDie("aval", clientSet, t)
|
||||
|
||||
discoveryClient := cacheddiscovery.NewMemCacheClient(clientSet.Discovery())
|
||||
restMapper := discovery.NewDeferredDiscoveryRESTMapper(discoveryClient, meta.InterfacesForUnstructured)
|
||||
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient)
|
||||
restMapper.Reset()
|
||||
deletableResources := garbagecollector.GetDeletableResources(discoveryClient)
|
||||
config := *result.ClientConfig
|
||||
config.ContentConfig = dynamic.ContentConfig()
|
||||
metaOnlyClientPool := dynamic.NewClientPool(&config, restMapper, dynamic.LegacyAPIPathResolverFunc)
|
||||
clientPool := dynamic.NewClientPool(&config, restMapper, dynamic.LegacyAPIPathResolverFunc)
|
||||
dynamicClient, err := dynamic.NewForConfig(&config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create dynamicClient: %v", err)
|
||||
}
|
||||
sharedInformers := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
alwaysStarted := make(chan struct{})
|
||||
close(alwaysStarted)
|
||||
gc, err := garbagecollector.NewGarbageCollector(
|
||||
metaOnlyClientPool,
|
||||
clientPool,
|
||||
dynamicClient,
|
||||
restMapper,
|
||||
deletableResources,
|
||||
garbagecollector.DefaultIgnoredResources(),
|
||||
@ -249,6 +250,12 @@ func setupWithServer(t *testing.T, result *kubeapiservertesting.TestServer, work
|
||||
}
|
||||
syncPeriod := 5 * time.Second
|
||||
startGC := func(workers int) {
|
||||
go wait.Until(func() {
|
||||
// Resetting the REST mapper will also invalidate the underlying discovery
|
||||
// client. This is a leaky abstraction and assumes behavior about the REST
|
||||
// mapper, but we'll deal with it for now.
|
||||
restMapper.Reset()
|
||||
}, syncPeriod, stopCh)
|
||||
go gc.Run(workers, stopCh)
|
||||
go gc.Sync(clientSet.Discovery(), syncPeriod, stopCh)
|
||||
}
|
||||
@ -262,7 +269,7 @@ func setupWithServer(t *testing.T, result *kubeapiservertesting.TestServer, work
|
||||
gc: gc,
|
||||
clientSet: clientSet,
|
||||
apiExtensionClient: apiExtensionClient,
|
||||
clientPool: clientPool,
|
||||
dynamicClient: dynamicClient,
|
||||
startGC: startGC,
|
||||
syncPeriod: syncPeriod,
|
||||
}
|
||||
@ -362,12 +369,12 @@ func TestCascadingDeletion(t *testing.T) {
|
||||
// sometimes the deletion of the RC takes long time to be observed by
|
||||
// the gc, so wait for the garbage collector to observe the deletion of
|
||||
// the toBeDeletedRC
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||
return !gc.GraphHasUID([]types.UID{toBeDeletedRC.ObjectMeta.UID}), nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := integration.WaitForPodToDisappear(podClient, garbageCollectedPodName, 5*time.Second, 30*time.Second); err != nil {
|
||||
if err := integration.WaitForPodToDisappear(podClient, garbageCollectedPodName, 1*time.Second, 30*time.Second); err != nil {
|
||||
t.Fatalf("expect pod %s to be garbage collected, got err= %v", garbageCollectedPodName, err)
|
||||
}
|
||||
// checks the garbage collect doesn't delete pods it shouldn't delete.
|
||||
@ -407,7 +414,7 @@ func TestCreateWithNonExistentOwner(t *testing.T) {
|
||||
t.Fatalf("Unexpected pod list: %v", pods.Items)
|
||||
}
|
||||
// wait for the garbage collector to delete the pod
|
||||
if err := integration.WaitForPodToDisappear(podClient, garbageCollectedPodName, 5*time.Second, 30*time.Second); err != nil {
|
||||
if err := integration.WaitForPodToDisappear(podClient, garbageCollectedPodName, 1*time.Second, 30*time.Second); err != nil {
|
||||
t.Fatalf("expect pod %s to be garbage collected, got err= %v", garbageCollectedPodName, err)
|
||||
}
|
||||
}
|
||||
@ -441,7 +448,7 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet
|
||||
// creation of the pods, otherwise if the deletion of RC is observed before
|
||||
// the creation of the pods, the pods will not be orphaned.
|
||||
if orphan {
|
||||
wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { return gc.GraphHasUID(podUIDs), nil })
|
||||
wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) { return gc.GraphHasUID(podUIDs), nil })
|
||||
}
|
||||
// delete the rc
|
||||
if err := rcClient.Delete(rc.ObjectMeta.Name, options); err != nil {
|
||||
@ -501,7 +508,7 @@ func TestStressingCascadingDeletion(t *testing.T) {
|
||||
wg.Wait()
|
||||
t.Logf("all pods are created, all replications controllers are created then deleted")
|
||||
// wait for the RCs and Pods to reach the expected numbers.
|
||||
if err := wait.Poll(5*time.Second, 300*time.Second, func() (bool, error) {
|
||||
if err := wait.Poll(1*time.Second, 300*time.Second, func() (bool, error) {
|
||||
podsInEachCollection := 3
|
||||
// see the comments on the calls to setupRCsPods for details
|
||||
remainingGroups := 3
|
||||
@ -568,14 +575,14 @@ func TestOrphaning(t *testing.T) {
|
||||
// we need wait for the gc to observe the creation of the pods, otherwise if
|
||||
// the deletion of RC is observed before the creation of the pods, the pods
|
||||
// will not be orphaned.
|
||||
wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { return gc.GraphHasUID(podUIDs), nil })
|
||||
wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) { return gc.GraphHasUID(podUIDs), nil })
|
||||
|
||||
err = rcClient.Delete(toBeDeletedRCName, getOrphanOptions())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to gracefully delete the rc: %v", err)
|
||||
}
|
||||
// verify the toBeDeleteRC is deleted
|
||||
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
if err := wait.PollImmediate(1*time.Second, 30*time.Second, func() (bool, error) {
|
||||
rcs, err := rcClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
@ -639,7 +646,7 @@ func TestSolidOwnerDoesNotBlockWaitingOwner(t *testing.T) {
|
||||
t.Fatalf("Failed to delete the rc: %v", err)
|
||||
}
|
||||
// verify the toBeDeleteRC is deleted
|
||||
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
if err := wait.PollImmediate(1*time.Second, 30*time.Second, func() (bool, error) {
|
||||
_, err := rcClient.Get(toBeDeletedRC.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
@ -707,7 +714,7 @@ func TestNonBlockingOwnerRefDoesNotBlock(t *testing.T) {
|
||||
t.Fatalf("Failed to delete the rc: %v", err)
|
||||
}
|
||||
// verify the toBeDeleteRC is deleted
|
||||
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
if err := wait.PollImmediate(1*time.Second, 30*time.Second, func() (bool, error) {
|
||||
_, err := rcClient.Get(toBeDeletedRC.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
@ -797,11 +804,11 @@ func TestCustomResourceCascadingDeletion(t *testing.T) {
|
||||
ctx := setup(t, 5)
|
||||
defer ctx.tearDown()
|
||||
|
||||
clientSet, apiExtensionClient, clientPool := ctx.clientSet, ctx.apiExtensionClient, ctx.clientPool
|
||||
clientSet, apiExtensionClient, dynamicClient := ctx.clientSet, ctx.apiExtensionClient, ctx.dynamicClient
|
||||
|
||||
ns := createNamespaceOrDie("crd-cascading", clientSet, t)
|
||||
|
||||
definition, resourceClient := createRandomCustomResourceDefinition(t, apiExtensionClient, clientPool, ns.Name)
|
||||
definition, resourceClient := createRandomCustomResourceDefinition(t, apiExtensionClient, dynamicClient, ns.Name)
|
||||
|
||||
// Create a custom owner resource.
|
||||
owner := newCRDInstance(definition, ns.Name, names.SimpleNameGenerator.GenerateName("owner"))
|
||||
@ -829,7 +836,7 @@ func TestCustomResourceCascadingDeletion(t *testing.T) {
|
||||
}
|
||||
|
||||
// Ensure the owner is deleted.
|
||||
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
|
||||
if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||
_, err := resourceClient.Get(owner.GetName(), metav1.GetOptions{})
|
||||
return errors.IsNotFound(err), nil
|
||||
}); err != nil {
|
||||
@ -857,13 +864,13 @@ func TestMixedRelationships(t *testing.T) {
|
||||
ctx := setup(t, 5)
|
||||
defer ctx.tearDown()
|
||||
|
||||
clientSet, apiExtensionClient, clientPool := ctx.clientSet, ctx.apiExtensionClient, ctx.clientPool
|
||||
clientSet, apiExtensionClient, dynamicClient := ctx.clientSet, ctx.apiExtensionClient, ctx.dynamicClient
|
||||
|
||||
ns := createNamespaceOrDie("crd-mixed", clientSet, t)
|
||||
|
||||
configMapClient := clientSet.CoreV1().ConfigMaps(ns.Name)
|
||||
|
||||
definition, resourceClient := createRandomCustomResourceDefinition(t, apiExtensionClient, clientPool, ns.Name)
|
||||
definition, resourceClient := createRandomCustomResourceDefinition(t, apiExtensionClient, dynamicClient, ns.Name)
|
||||
|
||||
// Create a custom owner resource.
|
||||
customOwner, err := resourceClient.Create(newCRDInstance(definition, ns.Name, names.SimpleNameGenerator.GenerateName("owner")))
|
||||
@ -907,7 +914,7 @@ func TestMixedRelationships(t *testing.T) {
|
||||
}
|
||||
|
||||
// Ensure the owner is deleted.
|
||||
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
|
||||
if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||
_, err := resourceClient.Get(customOwner.GetName(), metav1.GetOptions{})
|
||||
return errors.IsNotFound(err), nil
|
||||
}); err != nil {
|
||||
@ -931,7 +938,7 @@ func TestMixedRelationships(t *testing.T) {
|
||||
}
|
||||
|
||||
// Ensure the owner is deleted.
|
||||
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
|
||||
if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||
_, err := configMapClient.Get(coreOwner.GetName(), metav1.GetOptions{})
|
||||
return errors.IsNotFound(err), nil
|
||||
}); err != nil {
|
||||
@ -955,13 +962,13 @@ func TestCRDDeletionCascading(t *testing.T) {
|
||||
ctx := setup(t, 5)
|
||||
defer ctx.tearDown()
|
||||
|
||||
clientSet, apiExtensionClient, clientPool := ctx.clientSet, ctx.apiExtensionClient, ctx.clientPool
|
||||
clientSet, apiExtensionClient, dynamicClient := ctx.clientSet, ctx.apiExtensionClient, ctx.dynamicClient
|
||||
|
||||
ns := createNamespaceOrDie("crd-mixed", clientSet, t)
|
||||
|
||||
configMapClient := clientSet.CoreV1().ConfigMaps(ns.Name)
|
||||
|
||||
definition, resourceClient := createRandomCustomResourceDefinition(t, apiExtensionClient, clientPool, ns.Name)
|
||||
definition, resourceClient := createRandomCustomResourceDefinition(t, apiExtensionClient, dynamicClient, ns.Name)
|
||||
|
||||
// Create a custom owner resource.
|
||||
owner, err := resourceClient.Create(newCRDInstance(definition, ns.Name, names.SimpleNameGenerator.GenerateName("owner")))
|
||||
@ -987,7 +994,7 @@ func TestCRDDeletionCascading(t *testing.T) {
|
||||
}
|
||||
|
||||
// Ensure the owner is deleted.
|
||||
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
|
||||
if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||
_, err := resourceClient.Get(owner.GetName(), metav1.GetOptions{})
|
||||
return errors.IsNotFound(err), nil
|
||||
}); err != nil {
|
||||
@ -995,7 +1002,7 @@ func TestCRDDeletionCascading(t *testing.T) {
|
||||
}
|
||||
|
||||
// Ensure the dependent is deleted.
|
||||
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
|
||||
if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||
_, err := configMapClient.Get(dependent.GetName(), metav1.GetOptions{})
|
||||
return errors.IsNotFound(err), nil
|
||||
}); err != nil {
|
||||
|
70
vendor/k8s.io/kubernetes/test/integration/ipamperf/BUILD
generated
vendored
Normal file
70
vendor/k8s.io/kubernetes/test/integration/ipamperf/BUILD
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"ipam_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/controller/nodeipam:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/integration/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cloud.go",
|
||||
"results.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/ipamperf",
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce/cloud:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
|
||||
"//pkg/controller/util/node:go_default_library",
|
||||
"//test/integration/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v0.beta:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
85
vendor/k8s.io/kubernetes/test/integration/ipamperf/README.md
generated
vendored
Normal file
85
vendor/k8s.io/kubernetes/test/integration/ipamperf/README.md
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
IPAM Performance Test
|
||||
=====
|
||||
|
||||
Motivation
|
||||
-----
|
||||
We wanted to be able to test the behavior of the IPAM controller's under various scenarios,
|
||||
by mocking and monitoring the edges that the controller interacts with. This has the following goals:
|
||||
|
||||
- Save time on testing
|
||||
- To simulate various behaviors cheaply
|
||||
- To observe and model the ideal behavior of the IPAM controller code
|
||||
|
||||
Currently the test runs through the 4 different IPAM controller modes for cases where the kube API QPS is a)
|
||||
equal to and b) significantly less than the number of nodes being added to observe and quantify behavior.
|
||||
|
||||
How to run
|
||||
-------
|
||||
|
||||
```shell
|
||||
# In kubernetes root path
|
||||
make generated_files
|
||||
|
||||
cd test/integration/ipamperf
|
||||
./test-performance.sh
|
||||
```
|
||||
|
||||
The runner scripts support a few different options:
|
||||
|
||||
```shell
|
||||
./test-performance.sh -h
|
||||
usage: ./test-performance.sh [-h] [-d] [-r <pattern>] [-o <filename>]
|
||||
usage: ./test-performance.sh <options>
|
||||
-h display this help message
|
||||
-d enable debug logs in tests
|
||||
-r <pattern> regex pattern to match for tests
|
||||
-o <filename> file to write JSON formatted results to
|
||||
-p <id> enable cpu and memory profiles, output written to mem-<id>.out and cpu-<id>.out
|
||||
-c enable custom test configuration
|
||||
-a <name> allocator name, one of RangeAllocator, CloudAllocator, IPAMFromCluster, IPAMFromCloud
|
||||
-k <num> api server qps for allocator
|
||||
-n <num> number of nodes to simulate
|
||||
-m <num> api server qps for node creation
|
||||
-l <num> gce cloud endpoint qps
|
||||
```
|
||||
|
||||
The tests follow the pattern TestPerformance/{AllocatorType}-KubeQPS{X}-Nodes{Y}, where AllocatorType
|
||||
is one of
|
||||
|
||||
- RangeAllocator
|
||||
- IPAMFromCluster
|
||||
- CloudAllocator
|
||||
- IPAMFromCloud
|
||||
|
||||
and X represents the QPS configured for the kubernetes API client, and Y is the number of nodes to create.
|
||||
|
||||
The -d flags set the -v level for glog to 6, enabling nearly all of the debug logs in the code.
|
||||
|
||||
So to run the test for CloudAllocator with 10 nodes, one can run
|
||||
|
||||
```shell
|
||||
./test-performance.sh -r /CloudAllocator.*Nodes10$
|
||||
```
|
||||
|
||||
At the end of the test, a JSON format of the results for all the tests run is printed. Passing the -o option
|
||||
allows for also saving this JSON to a named file.
|
||||
|
||||
### Profiling the code
|
||||
It's possible to get the CPU and memory profiles of code during test execution by using the ```-p``` option.
|
||||
The CPU and memory profiles are generated in the same directory with the file names set to ```cpu-<id>.out```
|
||||
and ```cpu-<id>.out```, where ```<id>``` is the argument value. Typicall pattern is to put in the number
|
||||
of nodes being simulated as the id, or 'all' in case running the full suite.
|
||||
|
||||
### Custom Test Configuration
|
||||
It's also possible to run a custom test configuration by passing the -c option. With this option, it then
|
||||
possible to specify the number of nodes to simulate and the API server qps values for creation,
|
||||
IPAM allocation and cloud endpoint, along with the allocator name to run. The defaults values for the
|
||||
qps parmeters are 30 for IPAM allocation, 100 for node creation and 30 for the cloud endpoint, and the
|
||||
default allocator is the RangeAllocator.
|
||||
|
||||
Code Organization
|
||||
-----
|
||||
The core of the tests are defined in [ipam_test.go](ipam_test.go), using the t.Run() helper to control parallelism
|
||||
as we want to able to start the master once. [cloud.go](cloud.go) contains the mock of the cloud server endpoint
|
||||
and can be configured to behave differently as needed by the various modes. The tracking of the node behavior and
|
||||
creation of the test results data is in [results.go](results.go).
|
154
vendor/k8s.io/kubernetes/test/integration/ipamperf/cloud.go
generated
vendored
Normal file
154
vendor/k8s.io/kubernetes/test/integration/ipamperf/cloud.go
generated
vendored
Normal file
@ -0,0 +1,154 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ipamperf
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
beta "google.golang.org/api/compute/v0.beta"
|
||||
ga "google.golang.org/api/compute/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
|
||||
"k8s.io/kubernetes/test/integration/util"
|
||||
)
|
||||
|
||||
// implemntation note:
|
||||
// ------------------
|
||||
// cloud.go implements hooks and handler functions for the MockGCE cloud in order to meet expectations
|
||||
// of cloud behavior from the IPAM controllers. The key constraint is that the IPAM code is spread
|
||||
// across both GA and Beta instances, which are distinct objects in the mock. We need to solve for
|
||||
//
|
||||
// 1. When a GET is called on an instance, we lazy create the instance with or without an assigned
|
||||
// ip alias as needed by the IPAM controller type
|
||||
// 2. When we assign an IP alias for an instance, both the GA and Beta instance have to agree on the
|
||||
// assigned alias range
|
||||
//
|
||||
// We solve both the problems by using a baseInstanceList which maintains a list of known instances,
|
||||
// and their pre-assigned ip-alias ranges (if needed). We then create GetHook for GA and Beta GetInstance
|
||||
// calls as closures over this betaInstanceList that can lookup base instance data.
|
||||
//
|
||||
// This has the advantage that once the Get hook pouplates the GCEMock with the base data, we then let the
|
||||
// rest of the mock code run as is.
|
||||
|
||||
// baseInstance tracks basic instance data needed by the IPAM controllers
|
||||
type baseInstance struct {
|
||||
name string
|
||||
zone string
|
||||
aliasRange string
|
||||
}
|
||||
|
||||
// baseInstanceList tracks a set of base instances
|
||||
type baseInstanceList struct {
|
||||
allocateCIDR bool
|
||||
clusterCIDR *net.IPNet
|
||||
subnetMaskSize int
|
||||
cidrSet *cidrset.CidrSet
|
||||
|
||||
lock sync.Mutex // protect access to instances
|
||||
instances map[meta.Key]*baseInstance
|
||||
}
|
||||
|
||||
// toGA is an utility method to return the baseInstance data as a GA Instance object
|
||||
func (bi *baseInstance) toGA() *ga.Instance {
|
||||
inst := &ga.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*ga.NetworkInterface{{}}}
|
||||
if bi.aliasRange != "" {
|
||||
inst.NetworkInterfaces[0].AliasIpRanges = []*ga.AliasIpRange{
|
||||
{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},
|
||||
}
|
||||
}
|
||||
return inst
|
||||
}
|
||||
|
||||
// toGA is an utility method to return the baseInstance data as a beta Instance object
|
||||
func (bi *baseInstance) toBeta() *beta.Instance {
|
||||
inst := &beta.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*beta.NetworkInterface{{}}}
|
||||
if bi.aliasRange != "" {
|
||||
inst.NetworkInterfaces[0].AliasIpRanges = []*beta.AliasIpRange{
|
||||
{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},
|
||||
}
|
||||
}
|
||||
return inst
|
||||
}
|
||||
|
||||
// newBaseInstanceList is the baseInstanceList constructor
|
||||
func newBaseInstanceList(allocateCIDR bool, clusterCIDR *net.IPNet, subnetMaskSize int) *baseInstanceList {
|
||||
cidrSet, _ := cidrset.NewCIDRSet(clusterCIDR, subnetMaskSize)
|
||||
return &baseInstanceList{
|
||||
allocateCIDR: allocateCIDR,
|
||||
clusterCIDR: clusterCIDR,
|
||||
subnetMaskSize: subnetMaskSize,
|
||||
cidrSet: cidrSet,
|
||||
instances: make(map[meta.Key]*baseInstance),
|
||||
}
|
||||
}
|
||||
|
||||
// getOrCreateBaseInstance lazily creates a new base instance, assigning if allocateCIDR is true
|
||||
func (bil *baseInstanceList) getOrCreateBaseInstance(key *meta.Key) *baseInstance {
|
||||
bil.lock.Lock()
|
||||
defer bil.lock.Unlock()
|
||||
|
||||
inst, found := bil.instances[*key]
|
||||
if !found {
|
||||
inst = &baseInstance{name: key.Name, zone: key.Zone}
|
||||
if bil.allocateCIDR {
|
||||
nextRange, _ := bil.cidrSet.AllocateNext()
|
||||
inst.aliasRange = nextRange.String()
|
||||
}
|
||||
bil.instances[*key] = inst
|
||||
}
|
||||
return inst
|
||||
}
|
||||
|
||||
// newGAGetHook creates a new closure with the current baseInstanceList to be used as a MockInstances.GetHook
|
||||
func (bil *baseInstanceList) newGAGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {
|
||||
return func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {
|
||||
m.Lock.Lock()
|
||||
defer m.Lock.Unlock()
|
||||
|
||||
if _, found := m.Objects[*key]; !found {
|
||||
m.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toGA()}
|
||||
}
|
||||
return false, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// newBetaGetHook creates a new closure with the current baseInstanceList to be used as a MockBetaInstances.GetHook
|
||||
func (bil *baseInstanceList) newBetaGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) {
|
||||
return func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) {
|
||||
m.Lock.Lock()
|
||||
defer m.Lock.Unlock()
|
||||
|
||||
if _, found := m.Objects[*key]; !found {
|
||||
m.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toBeta()}
|
||||
}
|
||||
return false, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// newMockCloud returns a mock GCE instance with the appropriate handlers hooks
|
||||
func (bil *baseInstanceList) newMockCloud() cloud.Cloud {
|
||||
c := cloud.NewMockGCE(nil)
|
||||
|
||||
// insert hooks to lazy create a instance when needed
|
||||
c.MockInstances.GetHook = bil.newGAGetHook()
|
||||
c.MockBetaInstances.GetHook = bil.newBetaGetHook()
|
||||
|
||||
return c
|
||||
}
|
158
vendor/k8s.io/kubernetes/test/integration/ipamperf/ipam_test.go
generated
vendored
Normal file
158
vendor/k8s.io/kubernetes/test/integration/ipamperf/ipam_test.go
generated
vendored
Normal file
@ -0,0 +1,158 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ipamperf
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
|
||||
"k8s.io/kubernetes/test/integration/util"
|
||||
)
|
||||
|
||||
func setupAllocator(apiURL string, config *Config, clusterCIDR, serviceCIDR *net.IPNet, subnetMaskSize int) (*clientset.Clientset, util.ShutdownFunc, error) {
|
||||
controllerStopChan := make(chan struct{})
|
||||
shutdownFunc := func() {
|
||||
close(controllerStopChan)
|
||||
}
|
||||
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{
|
||||
Host: apiURL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}},
|
||||
QPS: float32(config.KubeQPS),
|
||||
Burst: config.KubeQPS,
|
||||
})
|
||||
|
||||
sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour)
|
||||
ipamController, err := nodeipam.NewNodeIpamController(
|
||||
sharedInformer.Core().V1().Nodes(), config.Cloud, clientSet,
|
||||
clusterCIDR, serviceCIDR, subnetMaskSize, config.AllocatorType,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, shutdownFunc, err
|
||||
}
|
||||
go ipamController.Run(controllerStopChan)
|
||||
sharedInformer.Start(controllerStopChan)
|
||||
|
||||
return clientSet, shutdownFunc, nil
|
||||
}
|
||||
|
||||
func runTest(t *testing.T, apiURL string, config *Config, clusterCIDR, serviceCIDR *net.IPNet, subnetMaskSize int) (*Results, error) {
|
||||
t.Helper()
|
||||
glog.Infof("Running test %s", t.Name())
|
||||
|
||||
defer deleteNodes(apiURL, config) // cleanup nodes on after controller shutdown
|
||||
|
||||
clientSet, shutdownFunc, err := setupAllocator(apiURL, config, clusterCIDR, serviceCIDR, subnetMaskSize)
|
||||
if err != nil {
|
||||
t.Fatalf("Error starting IPAM allocator: %v", err)
|
||||
}
|
||||
defer shutdownFunc()
|
||||
|
||||
o := NewObserver(clientSet, config.NumNodes)
|
||||
if err := o.StartObserving(); err != nil {
|
||||
t.Fatalf("Could not start test observer: %v", err)
|
||||
}
|
||||
|
||||
if err := createNodes(apiURL, config); err != nil {
|
||||
t.Fatalf("Could not create nodes: %v", err)
|
||||
}
|
||||
|
||||
results := o.Results(t.Name(), config)
|
||||
glog.Infof("Results: %s", results)
|
||||
if !results.Succeeded {
|
||||
t.Errorf("%s: Not allocations succeeded", t.Name())
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func logResults(allResults []*Results) {
|
||||
jStr, err := json.MarshalIndent(allResults, "", " ")
|
||||
if err != nil {
|
||||
glog.Errorf("Error formating results: %v", err)
|
||||
return
|
||||
}
|
||||
if resultsLogFile != "" {
|
||||
glog.Infof("Logging results to %s", resultsLogFile)
|
||||
if err := ioutil.WriteFile(resultsLogFile, jStr, os.FileMode(0644)); err != nil {
|
||||
glog.Errorf("Error logging results to %s: %v", resultsLogFile, err)
|
||||
}
|
||||
}
|
||||
glog.Infof("AllResults:\n%s", string(jStr))
|
||||
}
|
||||
|
||||
func TestPerformance(t *testing.T) {
|
||||
if testing.Short() {
|
||||
// TODO (#61854) find why flakiness is caused by etcd connectivity before enabling always
|
||||
t.Skip("Skipping because we want to run short tests")
|
||||
}
|
||||
|
||||
apiURL, masterShutdown := util.StartApiserver()
|
||||
defer masterShutdown()
|
||||
|
||||
_, clusterCIDR, _ := net.ParseCIDR("10.96.0.0/11") // allows up to 8K nodes
|
||||
_, serviceCIDR, _ := net.ParseCIDR("10.94.0.0/24") // does not matter for test - pick upto 250 services
|
||||
subnetMaskSize := 24
|
||||
|
||||
var (
|
||||
allResults []*Results
|
||||
tests []*Config
|
||||
)
|
||||
|
||||
if isCustom {
|
||||
tests = append(tests, customConfig)
|
||||
} else {
|
||||
for _, numNodes := range []int{10, 100} {
|
||||
for _, alloc := range []ipam.CIDRAllocatorType{ipam.RangeAllocatorType, ipam.CloudAllocatorType, ipam.IPAMFromClusterAllocatorType, ipam.IPAMFromCloudAllocatorType} {
|
||||
tests = append(tests, &Config{AllocatorType: alloc, NumNodes: numNodes, CreateQPS: numNodes, KubeQPS: 10, CloudQPS: 10})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
testName := fmt.Sprintf("%s-KubeQPS%d-Nodes%d", test.AllocatorType, test.KubeQPS, test.NumNodes)
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
allocateCIDR := false
|
||||
if test.AllocatorType == ipam.IPAMFromCloudAllocatorType || test.AllocatorType == ipam.CloudAllocatorType {
|
||||
allocateCIDR = true
|
||||
}
|
||||
bil := newBaseInstanceList(allocateCIDR, clusterCIDR, subnetMaskSize)
|
||||
cloud, err := util.NewMockGCECloud(bil.newMockCloud())
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to create mock cloud: %v", err)
|
||||
}
|
||||
test.Cloud = cloud
|
||||
if results, err := runTest(t, apiURL, test, clusterCIDR, serviceCIDR, subnetMaskSize); err == nil {
|
||||
allResults = append(allResults, results)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
logResults(allResults)
|
||||
}
|
66
vendor/k8s.io/kubernetes/test/integration/ipamperf/main_test.go
generated
vendored
Normal file
66
vendor/k8s.io/kubernetes/test/integration/ipamperf/main_test.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ipamperf
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
var (
|
||||
resultsLogFile string
|
||||
isCustom bool
|
||||
customConfig = &Config{
|
||||
NumNodes: 10,
|
||||
KubeQPS: 30,
|
||||
CloudQPS: 30,
|
||||
CreateQPS: 100,
|
||||
AllocatorType: ipam.RangeAllocatorType,
|
||||
}
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
allocator := string(ipam.RangeAllocatorType)
|
||||
|
||||
flag.StringVar(&resultsLogFile, "log", "", "log file to write JSON results to")
|
||||
flag.BoolVar(&isCustom, "custom", false, "enable custom test configuration")
|
||||
flag.StringVar(&allocator, "allocator", allocator, "allocator to use")
|
||||
flag.IntVar(&customConfig.KubeQPS, "kube-qps", customConfig.KubeQPS, "API server qps for allocations")
|
||||
flag.IntVar(&customConfig.NumNodes, "num-nodes", 10, "number of nodes to simulate")
|
||||
flag.IntVar(&customConfig.CreateQPS, "create-qps", customConfig.CreateQPS, "API server qps for node creation")
|
||||
flag.IntVar(&customConfig.CloudQPS, "cloud-qps", customConfig.CloudQPS, "GCE Cloud qps limit")
|
||||
flag.Parse()
|
||||
|
||||
switch allocator {
|
||||
case string(ipam.RangeAllocatorType):
|
||||
customConfig.AllocatorType = ipam.RangeAllocatorType
|
||||
case string(ipam.CloudAllocatorType):
|
||||
customConfig.AllocatorType = ipam.CloudAllocatorType
|
||||
case string(ipam.IPAMFromCloudAllocatorType):
|
||||
customConfig.AllocatorType = ipam.IPAMFromCloudAllocatorType
|
||||
case string(ipam.IPAMFromClusterAllocatorType):
|
||||
customConfig.AllocatorType = ipam.IPAMFromClusterAllocatorType
|
||||
default:
|
||||
glog.Fatalf("Unknown allocator type: %s", allocator)
|
||||
}
|
||||
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
221
vendor/k8s.io/kubernetes/test/integration/ipamperf/results.go
generated
vendored
Normal file
221
vendor/k8s.io/kubernetes/test/integration/ipamperf/results.go
generated
vendored
Normal file
@ -0,0 +1,221 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ipamperf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
|
||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||
)
|
||||
|
||||
// Config represents the test configuration that is being run
|
||||
type Config struct {
|
||||
CreateQPS int // rate at which nodes are created
|
||||
KubeQPS int // rate for communication with kubernetes API
|
||||
CloudQPS int // rate for communication with cloud endpoint
|
||||
NumNodes int // number of nodes to created and monitored
|
||||
AllocatorType ipam.CIDRAllocatorType // type of allocator to run
|
||||
Cloud cloudprovider.Interface // cloud provider
|
||||
}
|
||||
|
||||
type nodeTime struct {
|
||||
added time.Time // observed time for when node was added
|
||||
allocated time.Time // observed time for when node was assigned podCIDR
|
||||
podCIDR string // the allocated podCIDR range
|
||||
}
|
||||
|
||||
// Observer represents the handle to test observer that watches for node changes
|
||||
// and tracks behavior
|
||||
type Observer struct {
|
||||
numAdded int // number of nodes observed added
|
||||
numAllocated int // number of nodes observed allocated podCIDR
|
||||
timing map[string]*nodeTime // per node timing
|
||||
numNodes int // the number of nodes to expect
|
||||
stopChan chan struct{} // for the shared informer
|
||||
wg sync.WaitGroup
|
||||
clientSet *clientset.Clientset
|
||||
}
|
||||
|
||||
// JSONDuration is an alias of time.Duration to support custom Marshal code
|
||||
type JSONDuration time.Duration
|
||||
|
||||
// NodeDuration represents the CIDR allocation time for each node
|
||||
type NodeDuration struct {
|
||||
Name string // node name
|
||||
PodCIDR string // the podCIDR that was assigned to the node
|
||||
Duration JSONDuration // how long it took to assign podCIDR
|
||||
}
|
||||
|
||||
// Results represents the observed test results.
|
||||
type Results struct {
|
||||
Name string // name for the test
|
||||
Config *Config // handle to the test config
|
||||
Succeeded bool // whether all nodes were assigned podCIDR
|
||||
MaxAllocTime JSONDuration // the maximum time take for assignment per node
|
||||
TotalAllocTime JSONDuration // duration between first addition and last assignment
|
||||
NodeAllocTime []NodeDuration // assignment time by node name
|
||||
}
|
||||
|
||||
// NewObserver creates a new observer given a handle to the Clientset
|
||||
func NewObserver(clientSet *clientset.Clientset, numNodes int) *Observer {
|
||||
o := &Observer{
|
||||
timing: map[string]*nodeTime{},
|
||||
numNodes: numNodes,
|
||||
clientSet: clientSet,
|
||||
stopChan: make(chan struct{}),
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// StartObserving starts an asynchronous loop to monitor for node changes.
|
||||
// Call Results() to get the test results after starting observer.
|
||||
func (o *Observer) StartObserving() error {
|
||||
o.monitor()
|
||||
glog.Infof("Test observer started")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Results returns the test results. It waits for the observer to finish
|
||||
// and returns the computed results of the observations.
|
||||
func (o *Observer) Results(name string, config *Config) *Results {
|
||||
var (
|
||||
firstAdd time.Time // earliest time any node was added (first node add)
|
||||
lastAssignment time.Time // latest time any node was assignged CIDR (last node assignment)
|
||||
)
|
||||
o.wg.Wait()
|
||||
close(o.stopChan) // shutdown the shared informer
|
||||
|
||||
results := &Results{
|
||||
Name: name,
|
||||
Config: config,
|
||||
Succeeded: o.numAdded == o.numNodes && o.numAllocated == o.numNodes,
|
||||
MaxAllocTime: 0,
|
||||
NodeAllocTime: []NodeDuration{},
|
||||
}
|
||||
for name, nTime := range o.timing {
|
||||
addFound := !nTime.added.IsZero()
|
||||
if addFound && (firstAdd.IsZero() || nTime.added.Before(firstAdd)) {
|
||||
firstAdd = nTime.added
|
||||
}
|
||||
cidrFound := !nTime.allocated.IsZero()
|
||||
if cidrFound && nTime.allocated.After(lastAssignment) {
|
||||
lastAssignment = nTime.allocated
|
||||
}
|
||||
if addFound && cidrFound {
|
||||
allocTime := nTime.allocated.Sub(nTime.added)
|
||||
if allocTime > time.Duration(results.MaxAllocTime) {
|
||||
results.MaxAllocTime = JSONDuration(allocTime)
|
||||
}
|
||||
results.NodeAllocTime = append(results.NodeAllocTime, NodeDuration{
|
||||
Name: name, PodCIDR: nTime.podCIDR, Duration: JSONDuration(allocTime),
|
||||
})
|
||||
}
|
||||
}
|
||||
results.TotalAllocTime = JSONDuration(lastAssignment.Sub(firstAdd))
|
||||
sort.Slice(results.NodeAllocTime, func(i, j int) bool {
|
||||
return results.NodeAllocTime[i].Duration > results.NodeAllocTime[j].Duration
|
||||
})
|
||||
return results
|
||||
}
|
||||
|
||||
func (o *Observer) monitor() {
|
||||
o.wg.Add(1)
|
||||
|
||||
sharedInformer := informers.NewSharedInformerFactory(o.clientSet, 1*time.Second)
|
||||
nodeInformer := sharedInformer.Core().V1().Nodes().Informer()
|
||||
|
||||
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: nodeutil.CreateAddNodeHandler(func(node *v1.Node) (err error) {
|
||||
name := node.GetName()
|
||||
if node.Spec.PodCIDR != "" {
|
||||
// ignore nodes that have PodCIDR (might be hold over from previous runs that did not get cleaned up)
|
||||
return
|
||||
}
|
||||
nTime := &nodeTime{}
|
||||
o.timing[name] = nTime
|
||||
nTime.added = time.Now()
|
||||
o.numAdded = o.numAdded + 1
|
||||
return
|
||||
}),
|
||||
UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(oldNode, newNode *v1.Node) (err error) {
|
||||
name := newNode.GetName()
|
||||
nTime, found := o.timing[name]
|
||||
if !found {
|
||||
return // consistency check - ignore nodes we have not seen the add event for
|
||||
}
|
||||
// check if CIDR assigned and ignore redundant updates
|
||||
if newNode.Spec.PodCIDR != "" && nTime.podCIDR == "" {
|
||||
nTime.allocated = time.Now()
|
||||
nTime.podCIDR = newNode.Spec.PodCIDR
|
||||
o.numAllocated++
|
||||
if o.numAllocated%10 == 0 {
|
||||
glog.Infof("progress: %d/%d - %.2d%%", o.numAllocated, o.numNodes, (o.numAllocated * 100.0 / o.numNodes))
|
||||
}
|
||||
// do following check only if numAllocated is modified, as otherwise, redundant updates
|
||||
// can cause wg.Done() to be called multiple times, causing a panic
|
||||
if o.numAdded == o.numNodes && o.numAllocated == o.numNodes {
|
||||
glog.Info("All nodes assigned podCIDR")
|
||||
o.wg.Done()
|
||||
}
|
||||
}
|
||||
return
|
||||
}),
|
||||
})
|
||||
sharedInformer.Start(o.stopChan)
|
||||
}
|
||||
|
||||
// String implements the Stringer interface and returns a multi-line representation
|
||||
// of the test results.
|
||||
func (results *Results) String() string {
|
||||
var b bytes.Buffer
|
||||
fmt.Fprintf(&b, "\n TestName: %s", results.Name)
|
||||
fmt.Fprintf(&b, "\n NumNodes: %d, CreateQPS: %d, KubeQPS: %d, CloudQPS: %d, Allocator: %v",
|
||||
results.Config.NumNodes, results.Config.CreateQPS, results.Config.KubeQPS,
|
||||
results.Config.CloudQPS, results.Config.AllocatorType)
|
||||
fmt.Fprintf(&b, "\n Succeeded: %v, TotalAllocTime: %v, MaxAllocTime: %v",
|
||||
results.Succeeded, time.Duration(results.TotalAllocTime), time.Duration(results.MaxAllocTime))
|
||||
fmt.Fprintf(&b, "\n %5s %-20s %-20s %s", "Num", "Node", "PodCIDR", "Duration (s)")
|
||||
for i, d := range results.NodeAllocTime {
|
||||
fmt.Fprintf(&b, "\n %5d %-20s %-20s %10.3f", i+1, d.Name, d.PodCIDR, time.Duration(d.Duration).Seconds())
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface
|
||||
func (jDuration *JSONDuration) MarshalJSON() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("\"%s\"", time.Duration(*jDuration).String())), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface
|
||||
func (jDuration *JSONDuration) UnmarshalJSON(b []byte) (err error) {
|
||||
var d time.Duration
|
||||
if d, err = time.ParseDuration(string(b[1 : len(b)-1])); err == nil {
|
||||
*jDuration = JSONDuration(d)
|
||||
}
|
||||
return
|
||||
}
|
91
vendor/k8s.io/kubernetes/test/integration/ipamperf/test-performance.sh
generated
vendored
Executable file
91
vendor/k8s.io/kubernetes/test/integration/ipamperf/test-performance.sh
generated
vendored
Executable file
@ -0,0 +1,91 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
TEST_ARGS=""
|
||||
RUN_PATTERN=".*"
|
||||
PROFILE_OPTS=""
|
||||
|
||||
function usage() {
|
||||
echo "usage: $0 <options>"
|
||||
echo " -h display this help message"
|
||||
echo " -d enable debug logs in tests"
|
||||
echo " -r <pattern> regex pattern to match for tests"
|
||||
echo " -o <filename> file to write JSON formatted results to"
|
||||
echo " -p <id> enable cpu and memory profiles, output written to mem-<id>.out and cpu-<id>.out"
|
||||
echo " -c enable custom test configuration"
|
||||
echo " -a <name> allocator name, one of RangeAllocator, CloudAllocator, IPAMFromCluster, IPAMFromCloud"
|
||||
echo " -k <num> api server qps for allocator"
|
||||
echo " -n <num> number of nodes to simulate"
|
||||
echo " -m <num> api server qps for node creation"
|
||||
echo " -l <num> gce cloud endpoint qps"
|
||||
exit 1
|
||||
}
|
||||
|
||||
while getopts ":hdr:o:p:ca:k:n:m:l:" opt; do
|
||||
case ${opt} in
|
||||
d) TEST_ARGS="${TEST_ARGS} -v=6"
|
||||
;;
|
||||
r) RUN_PATTERN="${OPTARG}"
|
||||
;;
|
||||
o) TEST_ARGS="${TEST_ARGS} -log ${OPTARG}"
|
||||
;;
|
||||
p) PROFILE_OPTS="-memprofile mem-${OPTARG}.out -cpuprofile cpu-${OPTARG}.out"
|
||||
;;
|
||||
c) TEST_ARGS="${TEST_ARGS} -custom"
|
||||
;;
|
||||
a) TEST_ARGS="${TEST_ARGS} -allocator ${OPTARG}"
|
||||
;;
|
||||
k) TEST_ARGS="${TEST_ARGS} -kube-qps ${OPTARG}"
|
||||
;;
|
||||
n) TEST_ARGS="${TEST_ARGS} -num-nodes ${OPTARG}"
|
||||
;;
|
||||
m) TEST_ARGS="${TEST_ARGS} -create-qps ${OPTARG}"
|
||||
;;
|
||||
l) TEST_ARGS="${TEST_ARGS} -cloud-qps ${OPTARG}"
|
||||
;;
|
||||
h) usage
|
||||
;;
|
||||
\?) usage
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../../../
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
kube::golang::setup_env
|
||||
|
||||
DIR_BASENAME=$(dirname "${BASH_SOURCE}")
|
||||
pushd ${DIR_BASENAME}
|
||||
|
||||
cleanup() {
|
||||
popd 2> /dev/null
|
||||
kube::etcd::cleanup
|
||||
kube::log::status "performance test cleanup complete"
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
kube::etcd::start
|
||||
|
||||
# Running IPAM tests. It might take a long time.
|
||||
kube::log::status "performance test (IPAM) start"
|
||||
go test ${PROFILE_OPTS} -test.run=${RUN_PATTERN} -test.timeout=60m -test.short=false -v -args ${TEST_ARGS}
|
||||
kube::log::status "... IPAM tests finished."
|
95
vendor/k8s.io/kubernetes/test/integration/ipamperf/util.go
generated
vendored
Normal file
95
vendor/k8s.io/kubernetes/test/integration/ipamperf/util.go
generated
vendored
Normal file
@ -0,0 +1,95 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ipamperf
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
maxCreateRetries = 10
|
||||
retryDelay = 10 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
baseNodeTemplate = &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "sample-node-",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("32Gi"),
|
||||
},
|
||||
Phase: v1.NodeRunning,
|
||||
Conditions: []v1.NodeCondition{
|
||||
{Type: v1.NodeReady, Status: v1.ConditionTrue},
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func deleteNodes(apiURL string, config *Config) {
|
||||
glog.Info("Deleting nodes")
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{
|
||||
Host: apiURL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}},
|
||||
QPS: float32(config.CreateQPS),
|
||||
Burst: config.CreateQPS,
|
||||
})
|
||||
noGrace := int64(0)
|
||||
if err := clientSet.CoreV1().Nodes().DeleteCollection(&metav1.DeleteOptions{GracePeriodSeconds: &noGrace}, metav1.ListOptions{}); err != nil {
|
||||
glog.Errorf("Error deleting node: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func createNodes(apiURL string, config *Config) error {
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{
|
||||
Host: apiURL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}},
|
||||
QPS: float32(config.CreateQPS),
|
||||
Burst: config.CreateQPS,
|
||||
})
|
||||
glog.Infof("Creating %d nodes", config.NumNodes)
|
||||
for i := 0; i < config.NumNodes; i++ {
|
||||
var err error
|
||||
for j := 0; j < maxCreateRetries; j++ {
|
||||
if _, err = clientSet.CoreV1().Nodes().Create(baseNodeTemplate); err != nil && errors.IsServerTimeout(err) {
|
||||
glog.Infof("Server timeout creating nodes, retrying after %v", retryDelay)
|
||||
time.Sleep(retryDelay)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("Error creating nodes: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
glog.Infof("%d nodes created", config.NumNodes)
|
||||
return nil
|
||||
}
|
175
vendor/k8s.io/kubernetes/test/integration/master/BUILD
generated
vendored
175
vendor/k8s.io/kubernetes/test/integration/master/BUILD
generated
vendored
@ -11,12 +11,43 @@ go_test(
|
||||
size = "large",
|
||||
srcs = [
|
||||
"crd_test.go",
|
||||
"kms_transformation_test.go",
|
||||
"kube_apiserver_test.go",
|
||||
"main_test.go",
|
||||
"secrets_transformation_test.go",
|
||||
"synthetic_master_test.go",
|
||||
],
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
embed = [":go_default_library"],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
@ -29,6 +60,7 @@ go_test(
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
||||
"//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/networking/v1:go_default_library",
|
||||
@ -46,10 +78,10 @@ go_test(
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/features:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/tokentest:go_default_library",
|
||||
@ -58,7 +90,39 @@ go_test(
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration:go_default_library",
|
||||
],
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
filegroup(
|
||||
@ -77,9 +141,40 @@ filegroup(
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"kms_plugin_mock.go",
|
||||
"transformation_testcase.go",
|
||||
],
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/test/integration/master",
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app/testing:go_default_library",
|
||||
@ -87,14 +182,74 @@ go_library(
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
22
vendor/k8s.io/kubernetes/test/integration/master/crd_test.go
generated
vendored
22
vendor/k8s.io/kubernetes/test/integration/master/crd_test.go
generated
vendored
@ -41,7 +41,7 @@ import (
|
||||
)
|
||||
|
||||
func TestCRDShadowGroup(t *testing.T) {
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, framework.SharedEtcd())
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
kubeclient, err := kubernetes.NewForConfig(result.ClientConfig)
|
||||
@ -109,7 +109,7 @@ func TestCRDShadowGroup(t *testing.T) {
|
||||
func TestCRD(t *testing.T) {
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.Initializers, true)()
|
||||
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, []string{"--admission-control", "Initializers"}, framework.SharedEtcd())
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--admission-control", "Initializers"}, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
kubeclient, err := kubernetes.NewForConfig(result.ClientConfig)
|
||||
@ -150,14 +150,12 @@ func TestCRD(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Logf("Trying to access foos.cr.bar.com with dynamic client")
|
||||
barComConfig := *result.ClientConfig
|
||||
barComConfig.GroupVersion = &schema.GroupVersion{Group: "cr.bar.com", Version: "v1"}
|
||||
barComConfig.APIPath = "/apis"
|
||||
barComClient, err := dynamic.NewClient(&barComConfig)
|
||||
dynamicClient, err := dynamic.NewForConfig(result.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
_, err = barComClient.Resource(&metav1.APIResource{Name: "foos", Namespaced: true}, "default").List(metav1.ListOptions{})
|
||||
fooResource := schema.GroupVersionResource{Group: "cr.bar.com", Version: "v1", Resource: "foos"}
|
||||
_, err = dynamicClient.Resource(fooResource).Namespace("default").List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to list foos.cr.bar.com instances: %v", err)
|
||||
}
|
||||
@ -201,7 +199,7 @@ func TestCRD(t *testing.T) {
|
||||
}
|
||||
createErr := make(chan error, 1)
|
||||
go func() {
|
||||
_, err := barComClient.Resource(&metav1.APIResource{Name: "foos", Namespaced: true}, "default").Create(unstructuredFoo)
|
||||
_, err := dynamicClient.Resource(fooResource).Namespace("default").Create(unstructuredFoo)
|
||||
t.Logf("Foo instance create returned: %v", err)
|
||||
if err != nil {
|
||||
createErr <- err
|
||||
@ -216,7 +214,7 @@ func TestCRD(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Logf("Checking that Foo instance is visible with IncludeUninitialized=true")
|
||||
_, err := barComClient.Resource(&metav1.APIResource{Name: "foos", Namespaced: true}, "default").Get(foo.ObjectMeta.Name, metav1.GetOptions{
|
||||
_, err := dynamicClient.Resource(fooResource).Namespace("default").Get(foo.ObjectMeta.Name, metav1.GetOptions{
|
||||
IncludeUninitialized: true,
|
||||
})
|
||||
switch {
|
||||
@ -237,7 +235,7 @@ func TestCRD(t *testing.T) {
|
||||
for i := 0; i < 10; i++ {
|
||||
// would love to replace the following with a patch, but removing strings from the intitializer array
|
||||
// is not what JSON (Merge) patch authors had in mind.
|
||||
fooUnstructured, err := barComClient.Resource(&metav1.APIResource{Name: "foos", Namespaced: true}, "default").Get(foo.ObjectMeta.Name, metav1.GetOptions{
|
||||
fooUnstructured, err := dynamicClient.Resource(fooResource).Namespace("default").Get(foo.ObjectMeta.Name, metav1.GetOptions{
|
||||
IncludeUninitialized: true,
|
||||
})
|
||||
if err != nil {
|
||||
@ -274,7 +272,7 @@ func TestCRD(t *testing.T) {
|
||||
}
|
||||
fooUnstructured.UnmarshalJSON(bs)
|
||||
|
||||
_, err = barComClient.Resource(&metav1.APIResource{Name: "foos", Namespaced: true}, "default").Update(fooUnstructured)
|
||||
_, err = dynamicClient.Resource(fooResource).Namespace("default").Update(fooUnstructured)
|
||||
if err != nil && !errors.IsConflict(err) {
|
||||
t.Fatalf("Failed to update Foo instance: %v", err)
|
||||
} else if err == nil {
|
||||
@ -287,7 +285,7 @@ func TestCRD(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Logf("Checking that Foo instance is visible after removing the initializer")
|
||||
if _, err := barComClient.Resource(&metav1.APIResource{Name: "foos", Namespaced: true}, "default").Get(foo.ObjectMeta.Name, metav1.GetOptions{}); err != nil {
|
||||
if _, err := dynamicClient.Resource(fooResource).Namespace("default").Get(foo.ObjectMeta.Name, metav1.GetOptions{}); err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
10
vendor/k8s.io/kubernetes/test/integration/master/kms_plugin_mock.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/integration/master/kms_plugin_mock.go
generated
vendored
@ -1,3 +1,5 @@
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
@ -26,6 +28,7 @@ import (
|
||||
"golang.org/x/sys/unix"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/golang/glog"
|
||||
kmsapi "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1"
|
||||
)
|
||||
|
||||
@ -43,7 +46,6 @@ type base64Plugin struct {
|
||||
|
||||
// Allow users of the plugin to sense requests that were passed to KMS.
|
||||
encryptRequest chan *kmsapi.EncryptRequest
|
||||
decryptRequest chan *kmsapi.DecryptRequest
|
||||
}
|
||||
|
||||
func NewBase64Plugin() (*base64Plugin, error) {
|
||||
@ -55,6 +57,7 @@ func NewBase64Plugin() (*base64Plugin, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to listen on the unix socket, error: %v", err)
|
||||
}
|
||||
glog.Infof("Listening on %s", sockFile)
|
||||
|
||||
server := grpc.NewServer()
|
||||
|
||||
@ -62,7 +65,6 @@ func NewBase64Plugin() (*base64Plugin, error) {
|
||||
grpcServer: server,
|
||||
listener: listener,
|
||||
encryptRequest: make(chan *kmsapi.EncryptRequest, 1),
|
||||
decryptRequest: make(chan *kmsapi.DecryptRequest, 1),
|
||||
}
|
||||
|
||||
kmsapi.RegisterKeyManagementServiceServer(server, result)
|
||||
@ -83,7 +85,8 @@ func (s *base64Plugin) Version(ctx context.Context, request *kmsapi.VersionReque
|
||||
}
|
||||
|
||||
func (s *base64Plugin) Decrypt(ctx context.Context, request *kmsapi.DecryptRequest) (*kmsapi.DecryptResponse, error) {
|
||||
s.decryptRequest <- request
|
||||
glog.Infof("Received Decrypt Request for DEK: %s", string(request.Cipher))
|
||||
|
||||
buf := make([]byte, base64.StdEncoding.DecodedLen(len(request.Cipher)))
|
||||
n, err := base64.StdEncoding.Decode(buf, request.Cipher)
|
||||
if err != nil {
|
||||
@ -94,6 +97,7 @@ func (s *base64Plugin) Decrypt(ctx context.Context, request *kmsapi.DecryptReque
|
||||
}
|
||||
|
||||
func (s *base64Plugin) Encrypt(ctx context.Context, request *kmsapi.EncryptRequest) (*kmsapi.EncryptResponse, error) {
|
||||
glog.Infof("Received Encrypt Request for DEK: %x", request.Plain)
|
||||
s.encryptRequest <- request
|
||||
|
||||
buf := make([]byte, base64.StdEncoding.EncodedLen(len(request.Plain)))
|
||||
|
26
vendor/k8s.io/kubernetes/test/integration/master/kms_transformation_test.go
generated
vendored
26
vendor/k8s.io/kubernetes/test/integration/master/kms_transformation_test.go
generated
vendored
@ -1,3 +1,5 @@
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
@ -24,7 +26,6 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/storage/value"
|
||||
@ -84,7 +85,10 @@ func TestKMSProvider(t *testing.T) {
|
||||
t.Fatalf("failed to create mock of KMS Plugin: %v", err)
|
||||
}
|
||||
defer pluginMock.cleanUp()
|
||||
go pluginMock.grpcServer.Serve(pluginMock.listener)
|
||||
serveErr := make(chan error, 1)
|
||||
go func() {
|
||||
serveErr <- pluginMock.grpcServer.Serve(pluginMock.listener)
|
||||
}()
|
||||
|
||||
test, err := newTransformTest(t, kmsConfigYAML)
|
||||
if err != nil {
|
||||
@ -92,6 +96,11 @@ func TestKMSProvider(t *testing.T) {
|
||||
}
|
||||
defer test.cleanUp()
|
||||
|
||||
// As part of newTransformTest a new secret was created, so KMS Mock should have been exercised by this point.
|
||||
if len(serveErr) != 0 {
|
||||
t.Fatalf("KMSPlugin failed while serving requests: %v", <-serveErr)
|
||||
}
|
||||
|
||||
secretETCDPath := test.getETCDPath()
|
||||
var rawSecretAsSeenByETCD rawDEKKEKSecret
|
||||
rawSecretAsSeenByETCD, err = test.getRawSecretFromETCD()
|
||||
@ -135,15 +144,18 @@ func TestKMSProvider(t *testing.T) {
|
||||
if secretVal != string(s.Data[secretKey]) {
|
||||
t.Fatalf("expected %s from KubeAPI, but got %s", secretVal, string(s.Data[secretKey]))
|
||||
}
|
||||
test.printMetrics()
|
||||
}
|
||||
|
||||
func getDEKFromKMSPlugin(pluginMock *base64Plugin) ([]byte, error) {
|
||||
select {
|
||||
case e := <-pluginMock.encryptRequest:
|
||||
return e.Plain, nil
|
||||
case <-time.After(1 * time.Microsecond):
|
||||
return nil, fmt.Errorf("timed-out while getting encryption request from KMS Plugin Mock")
|
||||
// We expect KMS to already have seen an encryptRequest. Hence non-blocking call.
|
||||
e, ok := <-pluginMock.encryptRequest
|
||||
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to sense encryptRequest from KMS Plugin Mock")
|
||||
}
|
||||
|
||||
return e.Plain, nil
|
||||
}
|
||||
|
||||
func decryptPayload(key []byte, secret rawDEKKEKSecret, secretETCDPath string) ([]byte, error) {
|
||||
|
116
vendor/k8s.io/kubernetes/test/integration/master/kube_apiserver_test.go
generated
vendored
116
vendor/k8s.io/kubernetes/test/integration/master/kube_apiserver_test.go
generated
vendored
@ -18,13 +18,18 @@ package master
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
appsv1beta1 "k8s.io/api/apps/v1beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/registry/generic/registry"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kube-aggregator/pkg/apis/apiregistration"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
@ -32,7 +37,7 @@ import (
|
||||
)
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, framework.SharedEtcd())
|
||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
||||
defer server.TearDownFn()
|
||||
|
||||
client, err := kubernetes.NewForConfig(server.ClientConfig)
|
||||
@ -82,7 +87,7 @@ func TestRun(t *testing.T) {
|
||||
// apiextensions-server and the kube-aggregator server, both part of
|
||||
// the delegation chain in kube-apiserver.
|
||||
func TestOpenAPIDelegationChainPlumbing(t *testing.T) {
|
||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, framework.SharedEtcd())
|
||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
||||
defer server.TearDownFn()
|
||||
|
||||
kubeclient, err := kubernetes.NewForConfig(server.ClientConfig)
|
||||
@ -138,3 +143,110 @@ func TestOpenAPIDelegationChainPlumbing(t *testing.T) {
|
||||
t.Errorf("missing path: %q", registrationPrefix)
|
||||
}
|
||||
}
|
||||
|
||||
// return the unique endpoint IPs
|
||||
func getEndpointIPs(endpoints *corev1.Endpoints) []string {
|
||||
endpointMap := make(map[string]bool)
|
||||
ips := make([]string, 0)
|
||||
for _, subset := range endpoints.Subsets {
|
||||
for _, address := range subset.Addresses {
|
||||
if _, ok := endpointMap[address.IP]; !ok {
|
||||
endpointMap[address.IP] = true
|
||||
ips = append(ips, address.IP)
|
||||
}
|
||||
}
|
||||
}
|
||||
return ips
|
||||
}
|
||||
|
||||
func verifyEndpointsWithIPs(servers []*kubeapiservertesting.TestServer, ips []string) bool {
|
||||
listenAddresses := make([]string, 0)
|
||||
for _, server := range servers {
|
||||
listenAddresses = append(listenAddresses, server.ServerOpts.GenericServerRunOptions.AdvertiseAddress.String())
|
||||
}
|
||||
return reflect.DeepEqual(listenAddresses, ips)
|
||||
}
|
||||
|
||||
func testReconcilersMasterLease(t *testing.T, leaseCount int, masterCount int) {
|
||||
var leaseServers []*kubeapiservertesting.TestServer
|
||||
var masterCountServers []*kubeapiservertesting.TestServer
|
||||
etcd := framework.SharedEtcd()
|
||||
|
||||
instanceOptions := &kubeapiservertesting.TestServerInstanceOptions{
|
||||
DisableStorageCleanup: true,
|
||||
}
|
||||
|
||||
// cleanup the registry storage
|
||||
defer registry.CleanupStorage()
|
||||
|
||||
// 1. start masterCount api servers
|
||||
for i := 0; i < masterCount; i++ {
|
||||
// start master count api server
|
||||
server := kubeapiservertesting.StartTestServerOrDie(t, instanceOptions, []string{
|
||||
"--endpoint-reconciler-type", "master-count",
|
||||
"--advertise-address", fmt.Sprintf("10.0.1.%v", i+1),
|
||||
"--apiserver-count", fmt.Sprintf("%v", masterCount),
|
||||
}, etcd)
|
||||
masterCountServers = append(masterCountServers, server)
|
||||
}
|
||||
|
||||
// 2. verify master count servers have registered
|
||||
if err := wait.PollImmediate(3*time.Second, 2*time.Minute, func() (bool, error) {
|
||||
client, err := kubernetes.NewForConfig(masterCountServers[0].ClientConfig)
|
||||
endpoints, err := client.CoreV1().Endpoints("default").Get("kubernetes", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Logf("error fetching endpoints: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return verifyEndpointsWithIPs(masterCountServers, getEndpointIPs(endpoints)), nil
|
||||
}); err != nil {
|
||||
t.Fatalf("master count endpoints failed to register: %v", err)
|
||||
}
|
||||
|
||||
// 3. start lease api servers
|
||||
for i := 0; i < leaseCount; i++ {
|
||||
options := []string{
|
||||
"--endpoint-reconciler-type", "lease",
|
||||
"--advertise-address", fmt.Sprintf("10.0.1.%v", i+10),
|
||||
}
|
||||
server := kubeapiservertesting.StartTestServerOrDie(t, instanceOptions, options, etcd)
|
||||
defer server.TearDownFn()
|
||||
leaseServers = append(leaseServers, server)
|
||||
}
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// 4. Shutdown the masterCount server
|
||||
for _, server := range masterCountServers {
|
||||
server.TearDownFn()
|
||||
}
|
||||
|
||||
// 5. verify only leaseEndpoint servers left
|
||||
if err := wait.PollImmediate(3*time.Second, 2*time.Minute, func() (bool, error) {
|
||||
client, err := kubernetes.NewForConfig(leaseServers[0].ClientConfig)
|
||||
if err != nil {
|
||||
t.Logf("create client error: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
endpoints, err := client.CoreV1().Endpoints("default").Get("kubernetes", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Logf("error fetching endpoints: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return verifyEndpointsWithIPs(leaseServers, getEndpointIPs(endpoints)), nil
|
||||
}); err != nil {
|
||||
t.Fatalf("did not find only lease endpoints: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReconcilerMasterLeaseCombined(t *testing.T) {
|
||||
testReconcilersMasterLease(t, 1, 3)
|
||||
}
|
||||
|
||||
func TestReconcilerMasterLeaseMultiMoreMasters(t *testing.T) {
|
||||
testReconcilersMasterLease(t, 3, 2)
|
||||
}
|
||||
|
||||
func TestReconcilerMasterLeaseMultiCombined(t *testing.T) {
|
||||
testReconcilersMasterLease(t, 3, 3)
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/test/integration/master/secrets_transformation_test.go
generated
vendored
1
vendor/k8s.io/kubernetes/test/integration/master/secrets_transformation_test.go
generated
vendored
@ -124,6 +124,7 @@ func runBenchmark(b *testing.B, transformerConfig string) {
|
||||
b.StartTimer()
|
||||
test.benchmark(b)
|
||||
b.StopTimer()
|
||||
test.printMetrics()
|
||||
}
|
||||
|
||||
func unSealWithGCMTransformer(cipherText []byte, ctx value.Context,
|
||||
|
116
vendor/k8s.io/kubernetes/test/integration/master/synthetic_master_test.go
generated
vendored
116
vendor/k8s.io/kubernetes/test/integration/master/synthetic_master_test.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
@ -31,7 +32,9 @@ import (
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -41,6 +44,7 @@ import (
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizerfactory"
|
||||
"k8s.io/apiserver/plugin/pkg/authenticator/token/tokentest"
|
||||
clientsetv1 "k8s.io/client-go/kubernetes"
|
||||
clienttypedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
@ -225,6 +229,116 @@ func TestStatus(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func constructBody(val string, size int, field string, t *testing.T) *appsv1.Deployment {
|
||||
var replicas int32 = 1
|
||||
deploymentObject := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Deployment",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "test",
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Strategy: appsv1.DeploymentStrategy{
|
||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "foo",
|
||||
Image: "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
switch field {
|
||||
case "labels":
|
||||
labelsMap := map[string]string{}
|
||||
for i := 0; i < size; i++ {
|
||||
key := val + strconv.Itoa(i)
|
||||
labelsMap[key] = val
|
||||
}
|
||||
deploymentObject.ObjectMeta.Labels = labelsMap
|
||||
case "annotations":
|
||||
annotationsMap := map[string]string{}
|
||||
for i := 0; i < size; i++ {
|
||||
key := val + strconv.Itoa(i)
|
||||
annotationsMap[key] = val
|
||||
}
|
||||
deploymentObject.ObjectMeta.Annotations = annotationsMap
|
||||
case "finalizers":
|
||||
finalizerString := []string{}
|
||||
for i := 0; i < size; i++ {
|
||||
finalizerString = append(finalizerString, val)
|
||||
}
|
||||
deploymentObject.ObjectMeta.Finalizers = finalizerString
|
||||
default:
|
||||
t.Fatalf("Unexpected field: %s used for making large deployment object value", field)
|
||||
}
|
||||
|
||||
return deploymentObject
|
||||
}
|
||||
|
||||
func TestObjectSizeResponses(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
client := clientsetv1.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[api.GroupName].GroupVersion()}})
|
||||
|
||||
const DeploymentMegabyteSize = 100000
|
||||
const DeploymentTwoMegabyteSize = 1000000
|
||||
|
||||
expectedMsgFor1MB := `etcdserver: request is too large`
|
||||
expectedMsgFor2MB := `rpc error: code = ResourceExhausted desc = grpc: trying to send message larger than max`
|
||||
expectedMsgForLargeAnnotation := `metadata.annotations: Too long: must have at most 262144 characters`
|
||||
|
||||
deployment1 := constructBody("a", DeploymentMegabyteSize, "labels", t) // >1 MB file
|
||||
deployment2 := constructBody("a", DeploymentTwoMegabyteSize, "labels", t) // >2 MB file
|
||||
|
||||
deployment3 := constructBody("a", DeploymentMegabyteSize, "annotations", t)
|
||||
|
||||
deployment4 := constructBody("sample/sample", DeploymentMegabyteSize, "finalizers", t) // >1 MB file
|
||||
deployment5 := constructBody("sample/sample", DeploymentTwoMegabyteSize, "finalizers", t) // >2 MB file
|
||||
|
||||
requests := []struct {
|
||||
size string
|
||||
deploymentObject *appsv1.Deployment
|
||||
expectedMessage string
|
||||
}{
|
||||
{"1 MB", deployment1, expectedMsgFor1MB},
|
||||
{"2 MB", deployment2, expectedMsgFor2MB},
|
||||
{"1 MB", deployment3, expectedMsgForLargeAnnotation},
|
||||
{"1 MB", deployment4, expectedMsgFor1MB},
|
||||
{"2 MB", deployment5, expectedMsgFor2MB},
|
||||
}
|
||||
|
||||
for _, r := range requests {
|
||||
t.Run(r.size, func(t *testing.T) {
|
||||
_, err := client.AppsV1().Deployments(metav1.NamespaceDefault).Create(r.deploymentObject)
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), r.expectedMessage) {
|
||||
t.Errorf("got: %s;want: %s", err.Error(), r.expectedMessage)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatchSucceedsWithoutArgs(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
@ -666,7 +780,7 @@ func TestUpdateNodeObjects(t *testing.T) {
|
||||
go func(lister int) {
|
||||
w, err := c.Nodes().Watch(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("[watch:%d] error: %v", k, err)
|
||||
fmt.Printf("[watch:%d] error: %v", lister, err)
|
||||
return
|
||||
}
|
||||
i := 0
|
||||
|
32
vendor/k8s.io/kubernetes/test/integration/master/transformation_testcase.go
generated
vendored
32
vendor/k8s.io/kubernetes/test/integration/master/transformation_testcase.go
generated
vendored
@ -29,6 +29,7 @@ import (
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -47,6 +48,7 @@ const (
|
||||
encryptionConfigFileName = "encryption.conf"
|
||||
testNamespace = "secret-encryption-test"
|
||||
testSecret = "test-secret"
|
||||
metricsPrefix = "apiserver_storage_"
|
||||
)
|
||||
|
||||
type unSealSecret func(cipherText []byte, ctx value.Context, config encryptionconfig.ProviderConfig) ([]byte, error)
|
||||
@ -76,7 +78,7 @@ func newTransformTest(l kubeapiservertesting.Logger, transformerConfigYAML strin
|
||||
}
|
||||
}
|
||||
|
||||
if e.kubeAPIServer, err = kubeapiservertesting.StartTestServer(l, e.getEncryptionOptions(), e.storageConfig); err != nil {
|
||||
if e.kubeAPIServer, err = kubeapiservertesting.StartTestServer(l, nil, e.getEncryptionOptions(), e.storageConfig); err != nil {
|
||||
return nil, fmt.Errorf("failed to start KubeAPI server: %v", err)
|
||||
}
|
||||
|
||||
@ -237,3 +239,31 @@ func (e *transformTest) readRawRecordFromETCD(path string) (*clientv3.GetRespons
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (e *transformTest) printMetrics() error {
|
||||
e.logger.Logf("Transformation Metrics:")
|
||||
metrics, err := prometheus.DefaultGatherer.Gather()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to gather metrics: %s", err)
|
||||
}
|
||||
|
||||
for _, mf := range metrics {
|
||||
if strings.HasPrefix(*mf.Name, metricsPrefix) {
|
||||
e.logger.Logf("%s", *mf.Name)
|
||||
for _, metric := range mf.GetMetric() {
|
||||
e.logger.Logf("%v", metric)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func contains(s []string, e string) bool {
|
||||
for _, a := range s {
|
||||
if a == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/test/integration/metrics/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/test/integration/metrics/BUILD
generated
vendored
@ -35,13 +35,12 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_model/go:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
|
5
vendor/k8s.io/kubernetes/test/integration/metrics/metrics_test.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/integration/metrics/metrics_test.go
generated
vendored
@ -24,11 +24,10 @@ import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
||||
"github.com/golang/glog"
|
||||
@ -111,7 +110,7 @@ func TestApiserverMetrics(t *testing.T) {
|
||||
|
||||
// Make a request to the apiserver to ensure there's at least one data point
|
||||
// for the metrics we're expecting -- otherwise, they won't be exported.
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
if _, err := client.Core().Pods(metav1.NamespaceDefault).List(metav1.ListOptions{}); err != nil {
|
||||
t.Fatalf("unexpected error getting pods: %v", err)
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/objectmeta/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/objectmeta/BUILD
generated
vendored
@ -14,11 +14,11 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
|
4
vendor/k8s.io/kubernetes/test/integration/objectmeta/objectmeta_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/objectmeta/objectmeta_test.go
generated
vendored
@ -23,9 +23,9 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
@ -34,7 +34,7 @@ func TestIgnoreClusterName(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(config)
|
||||
defer closeFn()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-namespace",
|
||||
|
3
vendor/k8s.io/kubernetes/test/integration/openshift/openshift_test.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/integration/openshift/openshift_test.go
generated
vendored
@ -31,8 +31,7 @@ func TestMasterExportsSymbols(t *testing.T) {
|
||||
EnableMetrics: true,
|
||||
},
|
||||
ExtraConfig: master.ExtraConfig{
|
||||
EnableCoreControllers: false,
|
||||
EnableLogsSupport: false,
|
||||
EnableLogsSupport: false,
|
||||
},
|
||||
}
|
||||
_ = &master.Master{
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/pods/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/pods/BUILD
generated
vendored
@ -14,11 +14,11 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
|
6
vendor/k8s.io/kubernetes/test/integration/pods/pods_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/integration/pods/pods_test.go
generated
vendored
@ -22,9 +22,9 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
@ -36,7 +36,7 @@ func TestPodUpdateActiveDeadlineSeconds(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("pod-activedeadline-update", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
var (
|
||||
iZero = int64(0)
|
||||
@ -154,7 +154,7 @@ func TestPodReadOnlyFilesystem(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("pod-readonly-root", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/quota/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/quota/BUILD
generated
vendored
@ -14,7 +14,6 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
@ -30,6 +29,7 @@ go_test(
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
|
10
vendor/k8s.io/kubernetes/test/integration/quota/quota_test.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/integration/quota/quota_test.go
generated
vendored
@ -28,13 +28,13 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
internalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@ -62,8 +62,8 @@ func TestQuota(t *testing.T) {
|
||||
}))
|
||||
|
||||
admissionCh := make(chan struct{})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
config := &resourcequotaapi.Configuration{}
|
||||
admission, err := resourcequota.NewResourceQuota(config, 5, admissionCh)
|
||||
if err != nil {
|
||||
@ -250,8 +250,8 @@ func TestQuotaLimitedResourceDenial(t *testing.T) {
|
||||
}))
|
||||
|
||||
admissionCh := make(chan struct{})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
// stop creation of a pod resource unless there is a quota
|
||||
config := &resourcequotaapi.Configuration{
|
||||
|
4
vendor/k8s.io/kubernetes/test/integration/replicaset/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/replicaset/BUILD
generated
vendored
@ -19,8 +19,8 @@ go_test(
|
||||
"//pkg/util/slice:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
@ -28,8 +28,8 @@ go_test(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
|
149
vendor/k8s.io/kubernetes/test/integration/replicaset/replicaset_test.go
generated
vendored
149
vendor/k8s.io/kubernetes/test/integration/replicaset/replicaset_test.go
generated
vendored
@ -24,8 +24,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -33,8 +33,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
typedv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/retry"
|
||||
@ -54,18 +54,18 @@ func labelMap() map[string]string {
|
||||
return map[string]string{"foo": "bar"}
|
||||
}
|
||||
|
||||
func newRS(name, namespace string, replicas int) *v1beta1.ReplicaSet {
|
||||
func newRS(name, namespace string, replicas int) *apps.ReplicaSet {
|
||||
replicasCopy := int32(replicas)
|
||||
return &v1beta1.ReplicaSet{
|
||||
return &apps.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicaSet",
|
||||
APIVersion: "extensions/v1beta1",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1beta1.ReplicaSetSpec{
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labelMap(),
|
||||
},
|
||||
@ -151,7 +151,7 @@ func rmSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *replicaset.R
|
||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "rs-informers")), resyncPeriod)
|
||||
|
||||
rm := replicaset.NewReplicaSetController(
|
||||
informers.Extensions().V1beta1().ReplicaSets(),
|
||||
informers.Apps().V1().ReplicaSets(),
|
||||
informers.Core().V1().Pods(),
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replicaset-controller")),
|
||||
replicaset.BurstReplicas,
|
||||
@ -196,11 +196,11 @@ func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podN
|
||||
}
|
||||
}
|
||||
|
||||
func createRSsPods(t *testing.T, clientSet clientset.Interface, rss []*v1beta1.ReplicaSet, pods []*v1.Pod) ([]*v1beta1.ReplicaSet, []*v1.Pod) {
|
||||
var createdRSs []*v1beta1.ReplicaSet
|
||||
func createRSsPods(t *testing.T, clientSet clientset.Interface, rss []*apps.ReplicaSet, pods []*v1.Pod) ([]*apps.ReplicaSet, []*v1.Pod) {
|
||||
var createdRSs []*apps.ReplicaSet
|
||||
var createdPods []*v1.Pod
|
||||
for _, rs := range rss {
|
||||
createdRS, err := clientSet.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Create(rs)
|
||||
createdRS, err := clientSet.AppsV1().ReplicaSets(rs.Namespace).Create(rs)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create replica set %s: %v", rs.Name, err)
|
||||
}
|
||||
@ -218,16 +218,16 @@ func createRSsPods(t *testing.T, clientSet clientset.Interface, rss []*v1beta1.R
|
||||
}
|
||||
|
||||
// Verify .Status.Replicas is equal to .Spec.Replicas
|
||||
func waitRSStable(t *testing.T, clientSet clientset.Interface, rs *v1beta1.ReplicaSet) {
|
||||
func waitRSStable(t *testing.T, clientSet clientset.Interface, rs *apps.ReplicaSet) {
|
||||
if err := testutil.WaitRSStable(t, clientSet, rs, interval, timeout); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Update .Spec.Replicas to replicas and verify .Status.Replicas is changed accordingly
|
||||
func scaleRS(t *testing.T, c clientset.Interface, rs *v1beta1.ReplicaSet, replicas int32) {
|
||||
rsClient := c.ExtensionsV1beta1().ReplicaSets(rs.Namespace)
|
||||
rs = updateRS(t, rsClient, rs.Name, func(rs *v1beta1.ReplicaSet) {
|
||||
func scaleRS(t *testing.T, c clientset.Interface, rs *apps.ReplicaSet, replicas int32) {
|
||||
rsClient := c.AppsV1().ReplicaSets(rs.Namespace)
|
||||
rs = updateRS(t, rsClient, rs.Name, func(rs *apps.ReplicaSet) {
|
||||
*rs.Spec.Replicas = replicas
|
||||
})
|
||||
waitRSStable(t, c, rs)
|
||||
@ -273,8 +273,8 @@ func getPods(t *testing.T, podClient typedv1.PodInterface, labelMap map[string]s
|
||||
return pods
|
||||
}
|
||||
|
||||
func updateRS(t *testing.T, rsClient typedv1beta1.ReplicaSetInterface, rsName string, updateFunc func(*v1beta1.ReplicaSet)) *v1beta1.ReplicaSet {
|
||||
var rs *v1beta1.ReplicaSet
|
||||
func updateRS(t *testing.T, rsClient appsclient.ReplicaSetInterface, rsName string, updateFunc func(*apps.ReplicaSet)) *apps.ReplicaSet {
|
||||
var rs *apps.ReplicaSet
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
newRS, err := rsClient.Get(rsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -290,7 +290,7 @@ func updateRS(t *testing.T, rsClient typedv1beta1.ReplicaSetInterface, rsName st
|
||||
}
|
||||
|
||||
// Verify ControllerRef of a RS pod that has incorrect attributes is automatically patched by the RS
|
||||
func testPodControllerRefPatch(t *testing.T, c clientset.Interface, pod *v1.Pod, ownerReference *metav1.OwnerReference, rs *v1beta1.ReplicaSet, expectedOwnerReferenceNum int) {
|
||||
func testPodControllerRefPatch(t *testing.T, c clientset.Interface, pod *v1.Pod, ownerReference *metav1.OwnerReference, rs *apps.ReplicaSet, expectedOwnerReferenceNum int) {
|
||||
ns := rs.Namespace
|
||||
podClient := c.CoreV1().Pods(ns)
|
||||
updatePod(t, podClient, pod.Name, func(pod *v1.Pod) {
|
||||
@ -359,7 +359,7 @@ func setPodsReadyCondition(t *testing.T, clientSet clientset.Interface, pods *v1
|
||||
}
|
||||
}
|
||||
|
||||
func testScalingUsingScaleSubresource(t *testing.T, c clientset.Interface, rs *v1beta1.ReplicaSet, replicas int32) {
|
||||
func testScalingUsingScaleSubresource(t *testing.T, c clientset.Interface, rs *apps.ReplicaSet, replicas int32) {
|
||||
ns := rs.Namespace
|
||||
rsClient := c.ExtensionsV1beta1().ReplicaSets(ns)
|
||||
newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
@ -401,48 +401,48 @@ func TestAdoption(t *testing.T) {
|
||||
boolPtr := func(b bool) *bool { return &b }
|
||||
testCases := []struct {
|
||||
name string
|
||||
existingOwnerReferences func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference
|
||||
expectedOwnerReferences func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference
|
||||
existingOwnerReferences func(rs *apps.ReplicaSet) []metav1.OwnerReference
|
||||
expectedOwnerReferences func(rs *apps.ReplicaSet) []metav1.OwnerReference
|
||||
}{
|
||||
{
|
||||
"pod refers rs as an owner, not a controller",
|
||||
func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet"}}
|
||||
func(rs *apps.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "apps/v1", Kind: "ReplicaSet"}}
|
||||
},
|
||||
func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: boolPtr(true), BlockOwnerDeletion: boolPtr(true)}}
|
||||
func(rs *apps.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "apps/v1", Kind: "ReplicaSet", Controller: boolPtr(true), BlockOwnerDeletion: boolPtr(true)}}
|
||||
},
|
||||
},
|
||||
{
|
||||
"pod doesn't have owner references",
|
||||
func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference {
|
||||
func(rs *apps.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{}
|
||||
},
|
||||
func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: boolPtr(true), BlockOwnerDeletion: boolPtr(true)}}
|
||||
func(rs *apps.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "apps/v1", Kind: "ReplicaSet", Controller: boolPtr(true), BlockOwnerDeletion: boolPtr(true)}}
|
||||
},
|
||||
},
|
||||
{
|
||||
"pod refers rs as a controller",
|
||||
func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: boolPtr(true)}}
|
||||
func(rs *apps.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "apps/v1", Kind: "ReplicaSet", Controller: boolPtr(true)}}
|
||||
},
|
||||
func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: boolPtr(true)}}
|
||||
func(rs *apps.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "apps/v1", Kind: "ReplicaSet", Controller: boolPtr(true)}}
|
||||
},
|
||||
},
|
||||
{
|
||||
"pod refers other rs as the controller, refers the rs as an owner",
|
||||
func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference {
|
||||
func(rs *apps.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{
|
||||
{UID: "1", Name: "anotherRS", APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: boolPtr(true)},
|
||||
{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet"},
|
||||
{UID: "1", Name: "anotherRS", APIVersion: "apps/v1", Kind: "ReplicaSet", Controller: boolPtr(true)},
|
||||
{UID: rs.UID, Name: rs.Name, APIVersion: "apps/v1", Kind: "ReplicaSet"},
|
||||
}
|
||||
},
|
||||
func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference {
|
||||
func(rs *apps.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{
|
||||
{UID: "1", Name: "anotherRS", APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: boolPtr(true)},
|
||||
{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet"},
|
||||
{UID: "1", Name: "anotherRS", APIVersion: "apps/v1", Kind: "ReplicaSet", Controller: boolPtr(true)},
|
||||
{UID: rs.UID, Name: rs.Name, APIVersion: "apps/v1", Kind: "ReplicaSet"},
|
||||
}
|
||||
},
|
||||
},
|
||||
@ -454,7 +454,7 @@ func TestAdoption(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace(fmt.Sprintf("rs-adoption-%d", i), s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
rsClient := clientSet.ExtensionsV1beta1().ReplicaSets(ns.Name)
|
||||
rsClient := clientSet.AppsV1().ReplicaSets(ns.Name)
|
||||
podClient := clientSet.CoreV1().Pods(ns.Name)
|
||||
const rsName = "rs"
|
||||
rs, err := rsClient.Create(newRS(rsName, ns.Name, 1))
|
||||
@ -496,13 +496,17 @@ func TestRSSelectorImmutability(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("rs-selector-immutability", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
rs := newRS("rs", ns.Name, 0)
|
||||
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{})
|
||||
createRSsPods(t, clientSet, []*apps.ReplicaSet{rs}, []*v1.Pod{})
|
||||
|
||||
// test to ensure extensions/v1beta1 selector is mutable
|
||||
newSelectorLabels := map[string]string{"changed_name_extensions_v1beta1": "changed_test_extensions_v1beta1"}
|
||||
rs.Spec.Selector.MatchLabels = newSelectorLabels
|
||||
rs.Spec.Template.Labels = newSelectorLabels
|
||||
replicaset, err := clientSet.ExtensionsV1beta1().ReplicaSets(ns.Name).Update(rs)
|
||||
rsExt, err := clientSet.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get extensions/v1beta replicaset %s: %v", rs.Name, err)
|
||||
}
|
||||
rsExt.Spec.Selector.MatchLabels = newSelectorLabels
|
||||
rsExt.Spec.Template.Labels = newSelectorLabels
|
||||
replicaset, err := clientSet.ExtensionsV1beta1().ReplicaSets(ns.Name).Update(rsExt)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to update extensions/v1beta1 replicaset %s: %v", replicaset.Name, err)
|
||||
}
|
||||
@ -510,17 +514,17 @@ func TestRSSelectorImmutability(t *testing.T) {
|
||||
t.Errorf("selector should be changed for extensions/v1beta1, expected: %v, got: %v", newSelectorLabels, replicaset.Spec.Selector.MatchLabels)
|
||||
}
|
||||
|
||||
// test to ensure apps/v1beta2 selector is immutable
|
||||
rsV1beta2, err := clientSet.AppsV1beta2().ReplicaSets(ns.Name).Get(replicaset.Name, metav1.GetOptions{})
|
||||
// test to ensure apps/v1 selector is immutable
|
||||
rsV1, err := clientSet.AppsV1().ReplicaSets(ns.Name).Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get apps/v1beta2 replicaset %s: %v", replicaset.Name, err)
|
||||
t.Fatalf("failed to get apps/v1 replicaset %s: %v", rs.Name, err)
|
||||
}
|
||||
newSelectorLabels = map[string]string{"changed_name_apps_v1beta2": "changed_test_apps_v1beta2"}
|
||||
rsV1beta2.Spec.Selector.MatchLabels = newSelectorLabels
|
||||
rsV1beta2.Spec.Template.Labels = newSelectorLabels
|
||||
_, err = clientSet.AppsV1beta2().ReplicaSets(ns.Name).Update(rsV1beta2)
|
||||
newSelectorLabels = map[string]string{"changed_name_apps_v1": "changed_test_apps_v1"}
|
||||
rsV1.Spec.Selector.MatchLabels = newSelectorLabels
|
||||
rsV1.Spec.Template.Labels = newSelectorLabels
|
||||
_, err = clientSet.AppsV1().ReplicaSets(ns.Name).Update(rsV1)
|
||||
if err == nil {
|
||||
t.Fatalf("failed to provide validation error when changing immutable selector when updating apps/v1beta2 replicaset %s", rsV1beta2.Name)
|
||||
t.Fatalf("failed to provide validation error when changing immutable selector when updating apps/v1 replicaset %s", rsV1.Name)
|
||||
}
|
||||
expectedErrType := "Invalid value"
|
||||
expectedErrDetail := "field is immutable"
|
||||
@ -538,7 +542,7 @@ func TestSpecReplicasChange(t *testing.T) {
|
||||
defer close(stopCh)
|
||||
|
||||
rs := newRS("rs", ns.Name, 2)
|
||||
rss, _ := createRSsPods(t, c, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rss, _ := createRSsPods(t, c, []*apps.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rs = rss[0]
|
||||
waitRSStable(t, c, rs)
|
||||
|
||||
@ -549,9 +553,9 @@ func TestSpecReplicasChange(t *testing.T) {
|
||||
|
||||
// Add a template annotation change to test RS's status does update
|
||||
// without .Spec.Replicas change
|
||||
rsClient := c.ExtensionsV1beta1().ReplicaSets(ns.Name)
|
||||
rsClient := c.AppsV1().ReplicaSets(ns.Name)
|
||||
var oldGeneration int64
|
||||
newRS := updateRS(t, rsClient, rs.Name, func(rs *v1beta1.ReplicaSet) {
|
||||
newRS := updateRS(t, rsClient, rs.Name, func(rs *apps.ReplicaSet) {
|
||||
oldGeneration = rs.Generation
|
||||
rs.Spec.Template.Annotations = map[string]string{"test": "annotation"}
|
||||
})
|
||||
@ -580,7 +584,7 @@ func TestDeletingAndFailedPods(t *testing.T) {
|
||||
defer close(stopCh)
|
||||
|
||||
rs := newRS("rs", ns.Name, 2)
|
||||
rss, _ := createRSsPods(t, c, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rss, _ := createRSsPods(t, c, []*apps.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rs = rss[0]
|
||||
waitRSStable(t, c, rs)
|
||||
|
||||
@ -648,7 +652,7 @@ func TestOverlappingRSs(t *testing.T) {
|
||||
for i := 0; i < 2; i++ {
|
||||
// One RS has 1 replica, and another has 2 replicas
|
||||
rs := newRS(fmt.Sprintf("rs-%d", i+1), ns.Name, i+1)
|
||||
rss, _ := createRSsPods(t, c, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rss, _ := createRSsPods(t, c, []*apps.ReplicaSet{rs}, []*v1.Pod{})
|
||||
waitRSStable(t, c, rss[0])
|
||||
}
|
||||
|
||||
@ -680,7 +684,7 @@ func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
|
||||
defer close(stopCh)
|
||||
|
||||
rs := newRS("rs", ns.Name, 1)
|
||||
rss, _ := createRSsPods(t, c, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rss, _ := createRSsPods(t, c, []*apps.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rs = rss[0]
|
||||
waitRSStable(t, c, rs)
|
||||
|
||||
@ -757,7 +761,7 @@ func TestGeneralPodAdoption(t *testing.T) {
|
||||
defer close(stopCh)
|
||||
|
||||
rs := newRS("rs", ns.Name, 1)
|
||||
rss, _ := createRSsPods(t, c, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rss, _ := createRSsPods(t, c, []*apps.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rs = rss[0]
|
||||
waitRSStable(t, c, rs)
|
||||
|
||||
@ -772,11 +776,11 @@ func TestGeneralPodAdoption(t *testing.T) {
|
||||
// When the only OwnerReference of the pod points to another type of API object such as statefulset
|
||||
// with Controller=false, the RS should add a second OwnerReference (ControllerRef) pointing to itself
|
||||
// with Controller=true
|
||||
ownerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "apps/v1beta1", Kind: "StatefulSet", Name: rs.Name, Controller: &falseVar}
|
||||
ownerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "apps/v1", Kind: "StatefulSet", Name: rs.Name, Controller: &falseVar}
|
||||
testPodControllerRefPatch(t, c, pod, &ownerReference, rs, 2)
|
||||
|
||||
// When the only OwnerReference of the pod points to the RS, but Controller=false
|
||||
ownerReference = metav1.OwnerReference{UID: rs.UID, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Name: rs.Name, Controller: &falseVar}
|
||||
ownerReference = metav1.OwnerReference{UID: rs.UID, APIVersion: "apps/v1", Kind: "ReplicaSet", Name: rs.Name, Controller: &falseVar}
|
||||
testPodControllerRefPatch(t, c, pod, &ownerReference, rs, 1)
|
||||
}
|
||||
|
||||
@ -790,7 +794,7 @@ func TestReadyAndAvailableReplicas(t *testing.T) {
|
||||
|
||||
rs := newRS("rs", ns.Name, 3)
|
||||
rs.Spec.MinReadySeconds = 3600
|
||||
rss, _ := createRSsPods(t, c, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rss, _ := createRSsPods(t, c, []*apps.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rs = rss[0]
|
||||
waitRSStable(t, c, rs)
|
||||
|
||||
@ -841,7 +845,7 @@ func TestRSScaleSubresource(t *testing.T) {
|
||||
defer close(stopCh)
|
||||
|
||||
rs := newRS("rs", ns.Name, 1)
|
||||
rss, _ := createRSsPods(t, c, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rss, _ := createRSsPods(t, c, []*apps.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rs = rss[0]
|
||||
waitRSStable(t, c, rs)
|
||||
|
||||
@ -865,7 +869,7 @@ func TestExtraPodsAdoptionAndDeletion(t *testing.T) {
|
||||
pod.Labels = labelMap()
|
||||
podList = append(podList, pod)
|
||||
}
|
||||
rss, _ := createRSsPods(t, c, []*v1beta1.ReplicaSet{rs}, podList)
|
||||
rss, _ := createRSsPods(t, c, []*apps.ReplicaSet{rs}, podList)
|
||||
rs = rss[0]
|
||||
stopCh := runControllerAndInformers(t, rm, informers, 3)
|
||||
defer close(stopCh)
|
||||
@ -893,13 +897,13 @@ func TestFullyLabeledReplicas(t *testing.T) {
|
||||
|
||||
extraLabelMap := map[string]string{"foo": "bar", "extraKey": "extraValue"}
|
||||
rs := newRS("rs", ns.Name, 2)
|
||||
rss, _ := createRSsPods(t, c, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rss, _ := createRSsPods(t, c, []*apps.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rs = rss[0]
|
||||
waitRSStable(t, c, rs)
|
||||
|
||||
// Change RS's template labels to have extra labels, but not its selector
|
||||
rsClient := c.ExtensionsV1beta1().ReplicaSets(ns.Name)
|
||||
updateRS(t, rsClient, rs.Name, func(rs *v1beta1.ReplicaSet) {
|
||||
rsClient := c.AppsV1().ReplicaSets(ns.Name)
|
||||
updateRS(t, rsClient, rs.Name, func(rs *apps.ReplicaSet) {
|
||||
rs.Spec.Template.Labels = extraLabelMap
|
||||
})
|
||||
|
||||
@ -937,7 +941,7 @@ func TestReplicaSetsExtensionsV1beta1DefaultGCPolicy(t *testing.T) {
|
||||
rs := newRS("rs", ns.Name, 2)
|
||||
fakeFinalizer := "kube.io/dummy-finalizer"
|
||||
rs.Finalizers = []string{fakeFinalizer}
|
||||
rss, _ := createRSsPods(t, c, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rss, _ := createRSsPods(t, c, []*apps.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rs = rss[0]
|
||||
waitRSStable(t, c, rs)
|
||||
|
||||
@ -948,13 +952,14 @@ func TestReplicaSetsExtensionsV1beta1DefaultGCPolicy(t *testing.T) {
|
||||
t.Fatalf("len(pods) = %d, want 2", len(pods.Items))
|
||||
}
|
||||
|
||||
rsClient := c.ExtensionsV1beta1().ReplicaSets(ns.Name)
|
||||
err := rsClient.Delete(rs.Name, nil)
|
||||
// Delete via the extensions/v1beta1 endpoint.
|
||||
err := c.ExtensionsV1beta1().ReplicaSets(ns.Name).Delete(rs.Name, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to delete rs: %v", err)
|
||||
}
|
||||
|
||||
// Verify orphan finalizer has been added
|
||||
rsClient := c.AppsV1().ReplicaSets(ns.Name)
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -965,7 +970,7 @@ func TestReplicaSetsExtensionsV1beta1DefaultGCPolicy(t *testing.T) {
|
||||
t.Fatalf("Failed to verify orphan finalizer is added: %v", err)
|
||||
}
|
||||
|
||||
updateRS(t, rsClient, rs.Name, func(rs *v1beta1.ReplicaSet) {
|
||||
updateRS(t, rsClient, rs.Name, func(rs *apps.ReplicaSet) {
|
||||
var finalizers []string
|
||||
// remove fakeFinalizer
|
||||
for _, finalizer := range rs.Finalizers {
|
||||
@ -982,7 +987,7 @@ func TestReplicaSetsExtensionsV1beta1DefaultGCPolicy(t *testing.T) {
|
||||
func TestReplicaSetsAppsV1DefaultGCPolicy(t *testing.T) {
|
||||
s, closeFn, rm, informers, c := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("test-default-gc-extensions", s, t)
|
||||
ns := framework.CreateTestingNamespace("test-default-gc-v1", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
stopCh := runControllerAndInformers(t, rm, informers, 0)
|
||||
defer close(stopCh)
|
||||
@ -990,7 +995,7 @@ func TestReplicaSetsAppsV1DefaultGCPolicy(t *testing.T) {
|
||||
rs := newRS("rs", ns.Name, 2)
|
||||
fakeFinalizer := "kube.io/dummy-finalizer"
|
||||
rs.Finalizers = []string{fakeFinalizer}
|
||||
rss, _ := createRSsPods(t, c, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rss, _ := createRSsPods(t, c, []*apps.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rs = rss[0]
|
||||
waitRSStable(t, c, rs)
|
||||
|
||||
@ -1024,7 +1029,7 @@ func TestReplicaSetsAppsV1DefaultGCPolicy(t *testing.T) {
|
||||
t.Fatalf("Failed to verify the finalizer: %v", err)
|
||||
}
|
||||
|
||||
updateRS(t, c.ExtensionsV1beta1().ReplicaSets(ns.Name), rs.Name, func(rs *v1beta1.ReplicaSet) {
|
||||
updateRS(t, c.AppsV1().ReplicaSets(ns.Name), rs.Name, func(rs *apps.ReplicaSet) {
|
||||
var finalizers []string
|
||||
// remove fakeFinalizer
|
||||
for _, finalizer := range rs.Finalizers {
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/scale/scale_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/scale/scale_test.go
generated
vendored
@ -215,7 +215,7 @@ var (
|
||||
)
|
||||
|
||||
func setup(t *testing.T) (client kubernetes.Interface, tearDown func()) {
|
||||
result := apitesting.StartTestServerOrDie(t, nil, framework.SharedEtcd())
|
||||
result := apitesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
||||
|
||||
// TODO: Disable logging here until we resolve teardown issues which result in
|
||||
// massive log spam. Another path forward would be to refactor
|
||||
|
21
vendor/k8s.io/kubernetes/test/integration/scheduler/BUILD
generated
vendored
21
vendor/k8s.io/kubernetes/test/integration/scheduler/BUILD
generated
vendored
@ -23,8 +23,8 @@ go_test(
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//cmd/kube-scheduler/app:go_default_library",
|
||||
"//cmd/kube-scheduler/app/config:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
@ -36,13 +36,13 @@ go_test(
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//pkg/scheduler/schedulercache:go_default_library",
|
||||
"//plugin/pkg/admission/podtolerationrestriction:go_default_library",
|
||||
"//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
@ -51,8 +51,11 @@ go_test(
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
@ -86,17 +89,29 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/disruption:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
|
63
vendor/k8s.io/kubernetes/test/integration/scheduler/extender_test.go
generated
vendored
63
vendor/k8s.io/kubernetes/test/integration/scheduler/extender_test.go
generated
vendored
@ -31,19 +31,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
e2e "k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -130,7 +121,7 @@ func (e *Extender) filterUsingNodeCache(args *schedulerapi.ExtenderArgs) (*sched
|
||||
for _, nodeName := range *args.NodeNames {
|
||||
fits := true
|
||||
for _, predicate := range e.predicates {
|
||||
fit, err := predicate(&args.Pod,
|
||||
fit, err := predicate(args.Pod,
|
||||
&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}})
|
||||
if err != nil {
|
||||
return &schedulerapi.ExtenderFilterResult{
|
||||
@ -169,7 +160,7 @@ func (e *Extender) Filter(args *schedulerapi.ExtenderArgs) (*schedulerapi.Extend
|
||||
for _, node := range args.Nodes.Items {
|
||||
fits := true
|
||||
for _, predicate := range e.predicates {
|
||||
fit, err := predicate(&args.Pod, &node)
|
||||
fit, err := predicate(args.Pod, &node)
|
||||
if err != nil {
|
||||
return &schedulerapi.ExtenderFilterResult{
|
||||
Nodes: &v1.NodeList{},
|
||||
@ -217,7 +208,7 @@ func (e *Extender) Prioritize(args *schedulerapi.ExtenderArgs) (*schedulerapi.Ho
|
||||
continue
|
||||
}
|
||||
priorityFunc := prioritizer.function
|
||||
prioritizedList, err := priorityFunc(&args.Pod, nodes)
|
||||
prioritizedList, err := priorityFunc(args.Pod, nodes)
|
||||
if err != nil {
|
||||
return &schedulerapi.HostPriorityList{}, err
|
||||
}
|
||||
@ -288,13 +279,8 @@ func machine_3_Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostP
|
||||
}
|
||||
|
||||
func TestSchedulerExtender(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("scheduler-extender", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
context := initTestMaster(t, "scheduler-extender", nil)
|
||||
clientSet := context.clientSet
|
||||
|
||||
extender1 := &Extender{
|
||||
name: "extender1",
|
||||
@ -361,39 +347,12 @@ func TestSchedulerExtender(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
policy.APIVersion = testapi.Groups[v1.GroupName].GroupVersion().String()
|
||||
policy.APIVersion = "v1"
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
schedulerConfigFactory := factory.NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
clientSet,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
enableEquivalenceCache,
|
||||
)
|
||||
schedulerConfig, err := schedulerConfigFactory.CreateFromConfig(policy)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(clientSet.CoreV1().RESTClient()).Events("")})
|
||||
scheduler, _ := scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{Config: schedulerConfig}, nil...)
|
||||
informerFactory.Start(schedulerConfig.StopEverything)
|
||||
scheduler.Run()
|
||||
context = initTestScheduler(t, context, nil, false, &policy)
|
||||
defer cleanupTest(t, context)
|
||||
|
||||
defer close(schedulerConfig.StopEverything)
|
||||
|
||||
DoTestPodScheduling(ns, t, clientSet)
|
||||
DoTestPodScheduling(context.ns, t, clientSet)
|
||||
}
|
||||
|
||||
func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) {
|
||||
@ -430,7 +389,7 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface)
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "container",
|
||||
Image: e2e.GetPauseImageName(cs),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
extendedResourceName: *resource.NewQuantity(1, resource.DecimalSI),
|
||||
|
112
vendor/k8s.io/kubernetes/test/integration/scheduler/predicates_test.go
generated
vendored
112
vendor/k8s.io/kubernetes/test/integration/scheduler/predicates_test.go
generated
vendored
@ -24,8 +24,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
// This file tests the scheduler predicates functionality.
|
||||
@ -75,7 +75,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -108,7 +108,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -140,7 +140,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -167,7 +167,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name,
|
||||
},
|
||||
},
|
||||
@ -183,7 +183,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -205,7 +205,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
@ -221,7 +221,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -244,7 +244,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
@ -260,7 +260,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -282,7 +282,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
@ -297,7 +297,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -336,7 +336,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
@ -351,7 +351,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Name: "fakename",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -390,7 +390,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
@ -405,7 +405,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -443,7 +443,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
@ -458,7 +458,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -498,7 +498,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name,
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
@ -535,7 +535,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -573,7 +573,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
@ -588,7 +588,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -629,7 +629,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
{
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: nodes[0].Name,
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -665,7 +665,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -687,7 +687,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
@ -701,12 +701,12 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Name: "fakename",
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}}},
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}}},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
Spec: v1.PodSpec{NodeName: nodes[0].Name,
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -741,12 +741,12 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Name: "fake-name",
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}}},
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}}},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
Spec: v1.PodSpec{NodeName: nodes[0].Name,
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -779,7 +779,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "fake-name2"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeSelector: map[string]string{"region": "r1"},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
@ -803,7 +803,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{Name: "fakename", Labels: map[string]string{"foo": "abc"}}},
|
||||
},
|
||||
fits: false,
|
||||
@ -870,3 +870,53 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestNodePIDPressure verifies that scheduler's CheckNodePIDPressurePredicate predicate
|
||||
// functions works correctly.
|
||||
func TestNodePIDPressure(t *testing.T) {
|
||||
context := initTest(t, "node-pid-pressure")
|
||||
defer cleanupTest(t, context)
|
||||
// Add a node.
|
||||
node, err := createNode(context.clientSet, "testnode", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create node: %v", err)
|
||||
}
|
||||
|
||||
cs := context.clientSet
|
||||
|
||||
// Adds PID pressure condition to the node.
|
||||
node.Status.Conditions = []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodePIDPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
}
|
||||
|
||||
// Update node condition.
|
||||
err = updateNodeStatus(context.clientSet, node)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot update node: %v", err)
|
||||
}
|
||||
|
||||
// Creats test pod.
|
||||
testPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pidpressure-fake-name"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "container", Image: imageutils.GetPauseImageName()},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
testPod, err = cs.CoreV1().Pods(context.ns.Name).Create(testPod)
|
||||
if err != nil {
|
||||
t.Fatalf("Test Failed: error: %v, while creating pod", err)
|
||||
}
|
||||
|
||||
err = waitForPodUnschedulable(cs, testPod)
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while waiting for scheduled", err)
|
||||
}
|
||||
|
||||
cleanupPods(cs, t, []*v1.Pod{testPod})
|
||||
}
|
||||
|
170
vendor/k8s.io/kubernetes/test/integration/scheduler/preemption_test.go
generated
vendored
170
vendor/k8s.io/kubernetes/test/integration/scheduler/preemption_test.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
@ -41,8 +41,8 @@ import (
|
||||
|
||||
var lowPriority, mediumPriority, highPriority = int32(100), int32(200), int32(300)
|
||||
|
||||
func waitForNominatedNodeName(cs clientset.Interface, pod *v1.Pod) error {
|
||||
if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
func waitForNominatedNodeNameWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
|
||||
if err := wait.Poll(100*time.Millisecond, timeout, func() (bool, error) {
|
||||
pod, err := cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
@ -57,6 +57,10 @@ func waitForNominatedNodeName(cs clientset.Interface, pod *v1.Pod) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitForNominatedNodeName(cs clientset.Interface, pod *v1.Pod) error {
|
||||
return waitForNominatedNodeNameWithTimeout(cs, pod, wait.ForeverTestTimeout)
|
||||
}
|
||||
|
||||
// TestPreemption tests a few preemption scenarios.
|
||||
func TestPreemption(t *testing.T) {
|
||||
// Enable PodPriority feature gate.
|
||||
@ -285,6 +289,88 @@ func TestPreemption(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestDisablePreemption tests disable pod preemption of scheduler works as expected.
|
||||
func TestDisablePreemption(t *testing.T) {
|
||||
// Enable PodPriority feature gate.
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
|
||||
// Initialize scheduler, and disable preemption.
|
||||
context := initTestDisablePreemption(t, "disable-preemption")
|
||||
defer cleanupTest(t, context)
|
||||
cs := context.clientSet
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
existingPods []*v1.Pod
|
||||
pod *v1.Pod
|
||||
}{
|
||||
{
|
||||
description: "pod preemption will not happen",
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "victim-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
},
|
||||
}),
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
},
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
// Create a node with some resources and a label.
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.BinarySI),
|
||||
}
|
||||
_, err := createNode(context.clientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
pods := make([]*v1.Pod, len(test.existingPods))
|
||||
// Create and run existingPods.
|
||||
for i, p := range test.existingPods {
|
||||
pods[i], err = runPausePod(cs, p)
|
||||
if err != nil {
|
||||
t.Fatalf("Test [%v]: Error running pause pod: %v", test.description, err)
|
||||
}
|
||||
}
|
||||
// Create the "pod".
|
||||
preemptor, err := createPausePod(cs, test.pod)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating high priority pod: %v", err)
|
||||
}
|
||||
// Ensure preemptor should keep unschedulable.
|
||||
if err := waitForPodUnschedulable(cs, preemptor); err != nil {
|
||||
t.Errorf("Test [%v]: Preemptor %v should not become scheduled",
|
||||
test.description, preemptor.Name)
|
||||
}
|
||||
|
||||
// Ensure preemptor should not be nominated.
|
||||
if err := waitForNominatedNodeNameWithTimeout(cs, preemptor, 5*time.Second); err == nil {
|
||||
t.Errorf("Test [%v]: Preemptor %v should not be nominated",
|
||||
test.description, preemptor.Name)
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
pods = append(pods, preemptor)
|
||||
cleanupPods(cs, t, pods)
|
||||
}
|
||||
}
|
||||
|
||||
func mkPriorityPodWithGrace(tc *TestContext, name string, priority int32, grace int64) *v1.Pod {
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
@ -419,7 +505,11 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
// Initialize scheduler.
|
||||
context := initTest(t, "preemption")
|
||||
defer cleanupTest(t, context)
|
||||
|
||||
cs := context.clientSet
|
||||
|
||||
defer cleanupPodsInNamespace(cs, t, context.ns.Name)
|
||||
|
||||
// Create a node with some resources and a label.
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
@ -496,7 +586,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func mkMinAvailablePDB(name, namespace string, minAvailable int, matchLabels map[string]string) *policy.PodDisruptionBudget {
|
||||
func mkMinAvailablePDB(name, namespace string, uid types.UID, minAvailable int, matchLabels map[string]string) *policy.PodDisruptionBudget {
|
||||
intMinAvailable := intstr.FromInt(minAvailable)
|
||||
return &policy.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -510,6 +600,18 @@ func mkMinAvailablePDB(name, namespace string, minAvailable int, matchLabels map
|
||||
}
|
||||
}
|
||||
|
||||
func addPodConditionReady(pod *v1.Pod) {
|
||||
pod.Status = v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TestPDBInPreemption tests PodDisruptionBudget support in preemption.
|
||||
func TestPDBInPreemption(t *testing.T) {
|
||||
// Enable PodPriority feature gate.
|
||||
@ -519,6 +621,8 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
defer cleanupTest(t, context)
|
||||
cs := context.clientSet
|
||||
|
||||
initDisruptionController(context)
|
||||
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI)},
|
||||
@ -538,6 +642,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
description string
|
||||
nodes []*nodeConfig
|
||||
pdbs []*policy.PodDisruptionBudget
|
||||
pdbPodNum []int32
|
||||
existingPods []*v1.Pod
|
||||
pod *v1.Pod
|
||||
preemptedPodIndexes map[int]struct{}
|
||||
@ -546,8 +651,9 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
description: "A non-PDB violating pod is preempted despite its higher priority",
|
||||
nodes: []*nodeConfig{{name: "node-1", res: defaultNodeRes}},
|
||||
pdbs: []*policy.PodDisruptionBudget{
|
||||
mkMinAvailablePDB("pdb-1", context.ns.Name, 2, map[string]string{"foo": "bar"}),
|
||||
mkMinAvailablePDB("pdb-1", context.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo": "bar"}),
|
||||
},
|
||||
pdbPodNum: []int32{2},
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "low-pod1",
|
||||
@ -588,8 +694,9 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
{name: "node-2", res: defaultNodeRes},
|
||||
},
|
||||
pdbs: []*policy.PodDisruptionBudget{
|
||||
mkMinAvailablePDB("pdb-1", context.ns.Name, 2, map[string]string{"foo": "bar"}),
|
||||
mkMinAvailablePDB("pdb-1", context.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo": "bar"}),
|
||||
},
|
||||
pdbPodNum: []int32{1},
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "low-pod1",
|
||||
@ -626,9 +733,10 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
{name: "node-3", res: defaultNodeRes},
|
||||
},
|
||||
pdbs: []*policy.PodDisruptionBudget{
|
||||
mkMinAvailablePDB("pdb-1", context.ns.Name, 2, map[string]string{"foo1": "bar"}),
|
||||
mkMinAvailablePDB("pdb-2", context.ns.Name, 2, map[string]string{"foo2": "bar"}),
|
||||
mkMinAvailablePDB("pdb-1", context.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo1": "bar"}),
|
||||
mkMinAvailablePDB("pdb-2", context.ns.Name, types.UID("pdb-2-uid"), 2, map[string]string{"foo2": "bar"}),
|
||||
},
|
||||
pdbPodNum: []int32{1, 5},
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "low-pod1",
|
||||
@ -692,38 +800,22 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
v1.ResourceMemory: *resource.NewQuantity(400, resource.BinarySI)},
|
||||
},
|
||||
}),
|
||||
preemptedPodIndexes: map[int]struct{}{0: {}, 1: {}},
|
||||
// The third node is chosen because PDB is not violated for node 3 and the victims have lower priority than node-2.
|
||||
preemptedPodIndexes: map[int]struct{}{4: {}, 5: {}, 6: {}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Logf("================ Running test: %v\n", test.description)
|
||||
for _, nodeConf := range test.nodes {
|
||||
_, err := createNode(cs, nodeConf.name, nodeConf.res)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating node %v: %v", nodeConf.name, err)
|
||||
}
|
||||
}
|
||||
// Create PDBs.
|
||||
for _, pdb := range test.pdbs {
|
||||
_, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).Create(pdb)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create PDB: %v", err)
|
||||
}
|
||||
}
|
||||
// Wait for PDBs to show up in the scheduler's cache.
|
||||
if err := wait.Poll(time.Second, 15*time.Second, func() (bool, error) {
|
||||
cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
|
||||
if err != nil {
|
||||
t.Errorf("Error while polling for PDB: %v", err)
|
||||
return false, err
|
||||
}
|
||||
return len(cachedPDBs) == len(test.pdbs), err
|
||||
}); err != nil {
|
||||
t.Fatalf("Not all PDBs were added to the cache: %v", err)
|
||||
}
|
||||
|
||||
pods := make([]*v1.Pod, len(test.existingPods))
|
||||
var err error
|
||||
@ -732,7 +824,29 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
if pods[i], err = runPausePod(cs, p); err != nil {
|
||||
t.Fatalf("Test [%v]: Error running pause pod: %v", test.description, err)
|
||||
}
|
||||
// Add pod condition ready so that PDB is updated.
|
||||
addPodConditionReady(p)
|
||||
if _, err := context.clientSet.CoreV1().Pods(context.ns.Name).UpdateStatus(p); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
// Wait for Pods to be stable in scheduler cache.
|
||||
if err := waitCachedPodsStable(context, test.existingPods); err != nil {
|
||||
t.Fatalf("Not all pods are stable in the cache: %v", err)
|
||||
}
|
||||
|
||||
// Create PDBs.
|
||||
for _, pdb := range test.pdbs {
|
||||
_, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).Create(pdb)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create PDB: %v", err)
|
||||
}
|
||||
}
|
||||
// Wait for PDBs to show up in the scheduler's cache and become stable.
|
||||
if err := waitCachedPDBsStable(context, test.pdbs, test.pdbPodNum); err != nil {
|
||||
t.Fatalf("Not all pdbs are stable in the cache: %v", err)
|
||||
}
|
||||
|
||||
// Create the "pod".
|
||||
preemptor, err := createPausePod(cs, test.pod)
|
||||
if err != nil {
|
||||
|
180
vendor/k8s.io/kubernetes/test/integration/scheduler/scheduler_test.go
generated
vendored
180
vendor/k8s.io/kubernetes/test/integration/scheduler/scheduler_test.go
generated
vendored
@ -29,6 +29,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -41,20 +43,18 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
schedulerapp "k8s.io/kubernetes/cmd/kube-scheduler/app"
|
||||
schedulerappconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
const enableEquivalenceCache = true
|
||||
|
||||
type nodeMutationFunc func(t *testing.T, n *v1.Node, nodeLister corelisters.NodeLister, c clientset.Interface)
|
||||
|
||||
type nodeStateManager struct {
|
||||
@ -88,7 +88,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("configmap", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
defer clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
|
||||
@ -135,6 +135,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
"CheckNodeCondition", // mandatory predicate
|
||||
"CheckNodeDiskPressure",
|
||||
"CheckNodeMemoryPressure",
|
||||
"CheckNodePIDPressure",
|
||||
"CheckVolumeBinding",
|
||||
"GeneralPredicates",
|
||||
"MatchInterPodAffinity",
|
||||
@ -175,32 +176,34 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
Data: map[string]string{componentconfig.SchedulerPolicyConfigMapKey: test.policy},
|
||||
}
|
||||
|
||||
policyConfigMap.APIVersion = testapi.Groups[v1.GroupName].GroupVersion().String()
|
||||
policyConfigMap.APIVersion = "v1"
|
||||
clientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(&policyConfigMap)
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet.CoreV1().Events("")})
|
||||
|
||||
ss := &schedulerapp.SchedulerServer{
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
Namespace: policyConfigMap.Namespace,
|
||||
Name: policyConfigMap.Name,
|
||||
ss := &schedulerappconfig.Config{
|
||||
ComponentConfig: componentconfig.KubeSchedulerConfiguration{
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
Namespace: policyConfigMap.Namespace,
|
||||
Name: policyConfigMap.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
Client: clientSet,
|
||||
InformerFactory: informerFactory,
|
||||
PodInformer: factory.NewPodInformer(clientSet, 0, v1.DefaultSchedulerName),
|
||||
PodInformer: factory.NewPodInformer(clientSet, 0),
|
||||
EventClient: clientSet.CoreV1(),
|
||||
Recorder: eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
|
||||
Broadcaster: eventBroadcaster,
|
||||
}
|
||||
|
||||
config, err := ss.SchedulerConfig()
|
||||
config, err := schedulerapp.NewSchedulerConfig(ss.Complete())
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't make scheduler config: %v", err)
|
||||
}
|
||||
@ -232,34 +235,36 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("configmap", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
defer clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet.CoreV1().Events("")})
|
||||
|
||||
ss := &schedulerapp.SchedulerServer{
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
Namespace: "non-existent-config",
|
||||
Name: "non-existent-config",
|
||||
ss := &schedulerappconfig.Config{
|
||||
ComponentConfig: componentconfig.KubeSchedulerConfiguration{
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
Namespace: "non-existent-config",
|
||||
Name: "non-existent-config",
|
||||
},
|
||||
},
|
||||
},
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
},
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
Client: clientSet,
|
||||
InformerFactory: informerFactory,
|
||||
PodInformer: factory.NewPodInformer(clientSet, 0, v1.DefaultSchedulerName),
|
||||
PodInformer: factory.NewPodInformer(clientSet, 0),
|
||||
EventClient: clientSet.CoreV1(),
|
||||
Recorder: eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
|
||||
Broadcaster: eventBroadcaster,
|
||||
}
|
||||
|
||||
_, err := ss.SchedulerConfig()
|
||||
_, err := schedulerapp.NewSchedulerConfig(ss.Complete())
|
||||
if err == nil {
|
||||
t.Fatalf("Creation of scheduler didn't fail while the policy ConfigMap didn't exist.")
|
||||
}
|
||||
@ -513,33 +518,18 @@ func TestMultiScheduler(t *testing.T) {
|
||||
}
|
||||
|
||||
// 5. create and start a scheduler with name "foo-scheduler"
|
||||
clientSet2 := clientset.NewForConfigOrDie(&restclient.Config{Host: context.httpServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
clientSet2 := clientset.NewForConfigOrDie(&restclient.Config{Host: context.httpServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
informerFactory2 := informers.NewSharedInformerFactory(context.clientSet, 0)
|
||||
podInformer2 := factory.NewPodInformer(context.clientSet, 0, fooScheduler)
|
||||
podInformer2 := factory.NewPodInformer(context.clientSet, 0)
|
||||
|
||||
schedulerConfigFactory2 := factory.NewConfigFactory(
|
||||
fooScheduler,
|
||||
clientSet2,
|
||||
informerFactory2.Core().V1().Nodes(),
|
||||
podInformer2,
|
||||
informerFactory2.Core().V1().PersistentVolumes(),
|
||||
informerFactory2.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory2.Core().V1().ReplicationControllers(),
|
||||
informerFactory2.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory2.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory2.Core().V1().Services(),
|
||||
informerFactory2.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory2.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
enableEquivalenceCache,
|
||||
)
|
||||
schedulerConfigFactory2 := createConfiguratorWithPodInformer(fooScheduler, clientSet2, podInformer2, informerFactory2)
|
||||
schedulerConfig2, err := schedulerConfigFactory2.Create()
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster2 := record.NewBroadcaster()
|
||||
schedulerConfig2.Recorder = eventBroadcaster2.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: fooScheduler})
|
||||
eventBroadcaster2.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet2.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster2.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet2.CoreV1().Events("")})
|
||||
go podInformer2.Informer().Run(schedulerConfig2.StopEverything)
|
||||
informerFactory2.Start(schedulerConfig2.StopEverything)
|
||||
|
||||
@ -686,6 +676,7 @@ func TestPDBCache(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: context.ns.Name,
|
||||
Name: "test-pdb",
|
||||
UID: types.UID("test-pdb-uid"),
|
||||
Labels: map[string]string{"tkey1": "tval1", "tkey2": "tval2"},
|
||||
},
|
||||
Spec: policy.PodDisruptionBudgetSpec{
|
||||
@ -762,3 +753,96 @@ func TestPDBCache(t *testing.T) {
|
||||
t.Errorf("No PDB was deleted from the cache: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSchedulerInformers tests that scheduler receives informer events and updates its cache when
|
||||
// pods are scheduled by other schedulers.
|
||||
func TestSchedulerInformers(t *testing.T) {
|
||||
// Initialize scheduler.
|
||||
context := initTest(t, "scheduler-informer")
|
||||
defer cleanupTest(t, context)
|
||||
cs := context.clientSet
|
||||
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
}
|
||||
defaultNodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.BinarySI),
|
||||
}
|
||||
|
||||
type nodeConfig struct {
|
||||
name string
|
||||
res *v1.ResourceList
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
nodes []*nodeConfig
|
||||
existingPods []*v1.Pod
|
||||
pod *v1.Pod
|
||||
preemptedPodIndexes map[int]struct{}
|
||||
}{
|
||||
{
|
||||
description: "Pod cannot be scheduled when node is occupied by pods scheduled by other schedulers",
|
||||
nodes: []*nodeConfig{{name: "node-1", res: defaultNodeRes}},
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "pod1",
|
||||
Namespace: context.ns.Name,
|
||||
Resources: defaultPodRes,
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
NodeName: "node-1",
|
||||
SchedulerName: "foo-scheduler",
|
||||
}),
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "pod2",
|
||||
Namespace: context.ns.Name,
|
||||
Resources: defaultPodRes,
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
NodeName: "node-1",
|
||||
SchedulerName: "bar-scheduler",
|
||||
}),
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "unschedulable-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Resources: defaultPodRes,
|
||||
}),
|
||||
preemptedPodIndexes: map[int]struct{}{2: {}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for _, nodeConf := range test.nodes {
|
||||
_, err := createNode(cs, nodeConf.name, nodeConf.res)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating node %v: %v", nodeConf.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
pods := make([]*v1.Pod, len(test.existingPods))
|
||||
var err error
|
||||
// Create and run existingPods.
|
||||
for i, p := range test.existingPods {
|
||||
if pods[i], err = runPausePod(cs, p); err != nil {
|
||||
t.Fatalf("Test [%v]: Error running pause pod: %v", test.description, err)
|
||||
}
|
||||
}
|
||||
// Create the new "pod".
|
||||
unschedulable, err := createPausePod(cs, test.pod)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating new pod: %v", err)
|
||||
}
|
||||
if err := waitForPodUnschedulable(cs, unschedulable); err != nil {
|
||||
t.Errorf("Pod %v got scheduled: %v", unschedulable.Name, err)
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
pods = append(pods, unschedulable)
|
||||
cleanupPods(cs, t, pods)
|
||||
cs.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).DeleteCollection(nil, metav1.ListOptions{})
|
||||
cs.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
}
|
||||
}
|
||||
|
142
vendor/k8s.io/kubernetes/test/integration/scheduler/taint_test.go
generated
vendored
142
vendor/k8s.io/kubernetes/test/integration/scheduler/taint_test.go
generated
vendored
@ -19,8 +19,6 @@ package scheduler
|
||||
// This file tests the Taint feature.
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
@ -28,24 +26,18 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
internalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
"k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||
kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction"
|
||||
pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
// TestTaintNodeByCondition verifies:
|
||||
@ -53,37 +45,41 @@ import (
|
||||
// 2. NodeController taints nodes by node condition
|
||||
// 3. Scheduler allows pod to tolerate node condition taints, e.g. network unavailable
|
||||
func TestTaintNodeByCondition(t *testing.T) {
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-h.Initialized
|
||||
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled("TaintNodesByCondition")
|
||||
defer func() {
|
||||
if !enabled {
|
||||
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=False")
|
||||
}
|
||||
}()
|
||||
// Enable TaintNodeByCondition
|
||||
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=True")
|
||||
|
||||
// Build clientset and informers for controllers.
|
||||
internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
internalInformers := internalinformers.NewSharedInformerFactory(internalClientset, time.Second)
|
||||
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
informers := informers.NewSharedInformerFactory(clientset, time.Second)
|
||||
|
||||
// Build PodToleration Admission.
|
||||
admission := podtolerationrestriction.NewPodTolerationsPlugin(&pluginapi.Configuration{})
|
||||
|
||||
context := initTestMaster(t, "default", admission)
|
||||
|
||||
// Build clientset and informers for controllers.
|
||||
internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{
|
||||
QPS: -1,
|
||||
Host: context.httpServer.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
internalInformers := internalinformers.NewSharedInformerFactory(internalClientset, time.Second)
|
||||
|
||||
kubeadmission.WantsInternalKubeClientSet(admission).SetInternalKubeClientSet(internalClientset)
|
||||
kubeadmission.WantsInternalKubeInformerFactory(admission).SetInternalKubeInformerFactory(internalInformers)
|
||||
|
||||
// Start master with admission.
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.AdmissionControl = admission
|
||||
_, _, closeFn := framework.RunAMasterUsingServer(masterConfig, s, h)
|
||||
defer closeFn()
|
||||
|
||||
nsName := "default"
|
||||
controllerCh := make(chan struct{})
|
||||
defer close(controllerCh)
|
||||
|
||||
// Apply feature gates to enable TaintNodesByCondition
|
||||
algorithmprovider.ApplyFeatureGates()
|
||||
|
||||
context = initTestScheduler(t, context, controllerCh, false, nil)
|
||||
clientset := context.clientSet
|
||||
informers := context.informerFactory
|
||||
nsName := context.ns.Name
|
||||
|
||||
// Start NodeLifecycleController for taint.
|
||||
nc, err := nodelifecycle.NewNodeLifecycleController(
|
||||
informers.Core().V1().Pods(),
|
||||
@ -109,42 +105,8 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
}
|
||||
go nc.Run(controllerCh)
|
||||
|
||||
// Apply feature gates to enable TaintNodesByCondition
|
||||
algorithmprovider.ApplyFeatureGates()
|
||||
|
||||
// Start scheduler
|
||||
configurator := factory.NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
clientset,
|
||||
informers.Core().V1().Nodes(),
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Core().V1().PersistentVolumes(),
|
||||
informers.Core().V1().PersistentVolumeClaims(),
|
||||
informers.Core().V1().ReplicationControllers(),
|
||||
informers.Extensions().V1beta1().ReplicaSets(),
|
||||
informers.Apps().V1beta1().StatefulSets(),
|
||||
informers.Core().V1().Services(),
|
||||
informers.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informers.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
true, // Enable EqualCache by default.
|
||||
)
|
||||
|
||||
sched, err := scheduler.NewFromConfigurator(configurator, func(cfg *scheduler.Config) {
|
||||
cfg.StopEverything = controllerCh
|
||||
cfg.Recorder = &record.FakeRecorder{}
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create scheduler: %v.", err)
|
||||
return
|
||||
}
|
||||
go sched.Run()
|
||||
|
||||
// Waiting for all controller sync.
|
||||
informers.Start(controllerCh)
|
||||
internalInformers.Start(controllerCh)
|
||||
|
||||
informers.WaitForCacheSync(controllerCh)
|
||||
internalInformers.WaitForCacheSync(controllerCh)
|
||||
|
||||
// -------------------------------------------
|
||||
@ -244,6 +206,9 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
curNode := cur.(*v1.Node)
|
||||
if curNode.Name != "node-1" {
|
||||
return
|
||||
}
|
||||
for _, taint := range curNode.Spec.Taints {
|
||||
if taint.Key == algorithm.TaintNodeNetworkUnavailable &&
|
||||
taint.Effect == v1.TaintEffectNoSchedule {
|
||||
@ -294,4 +259,55 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
t.Errorf("Case 4: Failed to schedule network daemon pod in 60s.")
|
||||
}
|
||||
}
|
||||
|
||||
// Case 5: Taint node by unschedulable condition
|
||||
unschedulableNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-2",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
v1.ResourcePods: resource.MustParse("110"),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
v1.ResourcePods: resource.MustParse("110"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
nodeInformerCh2 := make(chan bool)
|
||||
nodeInformer2 := informers.Core().V1().Nodes().Informer()
|
||||
nodeInformer2.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
curNode := cur.(*v1.Node)
|
||||
if curNode.Name != "node-2" {
|
||||
return
|
||||
}
|
||||
|
||||
for _, taint := range curNode.Spec.Taints {
|
||||
if taint.Key == algorithm.TaintNodeUnschedulable &&
|
||||
taint.Effect == v1.TaintEffectNoSchedule {
|
||||
nodeInformerCh2 <- true
|
||||
break
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
if _, err := clientset.CoreV1().Nodes().Create(unschedulableNode); err != nil {
|
||||
t.Errorf("Case 5: Failed to create node: %v", err)
|
||||
} else {
|
||||
select {
|
||||
case <-time.After(60 * time.Second):
|
||||
t.Errorf("Case 5: Failed to taint node after 60s.")
|
||||
case <-nodeInformerCh2:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
321
vendor/k8s.io/kubernetes/test/integration/scheduler/util.go
generated
vendored
321
vendor/k8s.io/kubernetes/test/integration/scheduler/util.go
generated
vendored
@ -18,16 +18,25 @@ package scheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/client-go/informers"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientv1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
@ -35,12 +44,15 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/disruption"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
||||
"net/http/httptest"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
type TestContext struct {
|
||||
@ -54,50 +66,194 @@ type TestContext struct {
|
||||
scheduler *scheduler.Scheduler
|
||||
}
|
||||
|
||||
// initTest initializes a test environment and creates a scheduler with default
|
||||
// configuration.
|
||||
func initTest(t *testing.T, nsPrefix string) *TestContext {
|
||||
var context TestContext
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
_, context.httpServer, context.closeFn = framework.RunAMaster(masterConfig)
|
||||
|
||||
context.ns = framework.CreateTestingNamespace(nsPrefix+string(uuid.NewUUID()), context.httpServer, t)
|
||||
|
||||
context.clientSet = clientset.NewForConfigOrDie(&restclient.Config{Host: context.httpServer.URL})
|
||||
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, 0)
|
||||
podInformer := factory.NewPodInformer(context.clientSet, 12*time.Hour, v1.DefaultSchedulerName)
|
||||
context.schedulerConfigFactory = factory.NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
context.clientSet,
|
||||
context.informerFactory.Core().V1().Nodes(),
|
||||
// createConfiguratorWithPodInformer creates a configurator for scheduler.
|
||||
func createConfiguratorWithPodInformer(
|
||||
schedulerName string,
|
||||
clientSet clientset.Interface,
|
||||
podInformer coreinformers.PodInformer,
|
||||
informerFactory informers.SharedInformerFactory,
|
||||
) scheduler.Configurator {
|
||||
return factory.NewConfigFactory(
|
||||
schedulerName,
|
||||
clientSet,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
podInformer,
|
||||
context.informerFactory.Core().V1().PersistentVolumes(),
|
||||
context.informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
context.informerFactory.Core().V1().ReplicationControllers(),
|
||||
context.informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
context.informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
context.informerFactory.Core().V1().Services(),
|
||||
context.informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
context.informerFactory.Storage().V1().StorageClasses(),
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
true,
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.EnableEquivalenceClassCache),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// initTestMasterAndScheduler initializes a test environment and creates a master with default
|
||||
// configuration.
|
||||
func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface) *TestContext {
|
||||
var context TestContext
|
||||
|
||||
// 1. Create master
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-h.Initialized
|
||||
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
|
||||
if admission != nil {
|
||||
masterConfig.GenericConfig.AdmissionControl = admission
|
||||
}
|
||||
|
||||
_, context.httpServer, context.closeFn = framework.RunAMasterUsingServer(masterConfig, s, h)
|
||||
|
||||
if nsPrefix != "default" {
|
||||
context.ns = framework.CreateTestingNamespace(nsPrefix+string(uuid.NewUUID()), s, t)
|
||||
} else {
|
||||
context.ns = framework.CreateTestingNamespace("default", s, t)
|
||||
}
|
||||
|
||||
// 2. Create kubeclient
|
||||
context.clientSet = clientset.NewForConfigOrDie(
|
||||
&restclient.Config{
|
||||
QPS: -1, Host: s.URL,
|
||||
ContentConfig: restclient.ContentConfig{
|
||||
GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"},
|
||||
},
|
||||
},
|
||||
)
|
||||
return &context
|
||||
}
|
||||
|
||||
// initTestScheduler initializes a test environment and creates a scheduler with default
|
||||
// configuration.
|
||||
func initTestScheduler(
|
||||
t *testing.T,
|
||||
context *TestContext,
|
||||
controllerCh chan struct{},
|
||||
setPodInformer bool,
|
||||
policy *schedulerapi.Policy,
|
||||
) *TestContext {
|
||||
// Pod preemption is enabled by default scheduler configuration, but preemption only happens when PodPriority
|
||||
// feature gate is enabled at the same time.
|
||||
return initTestSchedulerWithOptions(t, context, controllerCh, setPodInformer, policy, false)
|
||||
}
|
||||
|
||||
// initTestSchedulerWithOptions initializes a test environment and creates a scheduler with default
|
||||
// configuration and other options.
|
||||
func initTestSchedulerWithOptions(
|
||||
t *testing.T,
|
||||
context *TestContext,
|
||||
controllerCh chan struct{},
|
||||
setPodInformer bool,
|
||||
policy *schedulerapi.Policy,
|
||||
disablePreemption bool,
|
||||
) *TestContext {
|
||||
// Enable EnableEquivalenceClassCache for all integration tests.
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(
|
||||
t,
|
||||
utilfeature.DefaultFeatureGate,
|
||||
features.EnableEquivalenceClassCache, true)()
|
||||
|
||||
// 1. Create scheduler
|
||||
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, time.Second)
|
||||
|
||||
var podInformer coreinformers.PodInformer
|
||||
|
||||
// create independent pod informer if required
|
||||
if setPodInformer {
|
||||
podInformer = factory.NewPodInformer(context.clientSet, 12*time.Hour)
|
||||
} else {
|
||||
podInformer = context.informerFactory.Core().V1().Pods()
|
||||
}
|
||||
|
||||
context.schedulerConfigFactory = createConfiguratorWithPodInformer(
|
||||
v1.DefaultSchedulerName, context.clientSet, podInformer, context.informerFactory)
|
||||
|
||||
var err error
|
||||
context.schedulerConfig, err = context.schedulerConfigFactory.Create()
|
||||
|
||||
if policy != nil {
|
||||
context.schedulerConfig, err = context.schedulerConfigFactory.CreateFromConfig(*policy)
|
||||
} else {
|
||||
context.schedulerConfig, err = context.schedulerConfigFactory.Create()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
|
||||
// set controllerCh if provided.
|
||||
if controllerCh != nil {
|
||||
context.schedulerConfig.StopEverything = controllerCh
|
||||
}
|
||||
|
||||
// set DisablePreemption option
|
||||
context.schedulerConfig.DisablePreemption = disablePreemption
|
||||
|
||||
// set setPodInformer if provided.
|
||||
if setPodInformer {
|
||||
go podInformer.Informer().Run(context.schedulerConfig.StopEverything)
|
||||
controller.WaitForCacheSync("scheduler", context.schedulerConfig.StopEverything, podInformer.Informer().HasSynced)
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
context.schedulerConfig.Recorder = eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(context.clientSet.CoreV1().RESTClient()).Events("")})
|
||||
go podInformer.Informer().Run(context.schedulerConfig.StopEverything)
|
||||
context.schedulerConfig.Recorder = eventBroadcaster.NewRecorder(
|
||||
legacyscheme.Scheme,
|
||||
v1.EventSource{Component: v1.DefaultSchedulerName},
|
||||
)
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{
|
||||
Interface: context.clientSet.CoreV1().Events(""),
|
||||
})
|
||||
|
||||
context.informerFactory.Start(context.schedulerConfig.StopEverything)
|
||||
context.scheduler, err = scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{Config: context.schedulerConfig}, nil...)
|
||||
context.informerFactory.WaitForCacheSync(context.schedulerConfig.StopEverything)
|
||||
|
||||
context.scheduler, err = scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{
|
||||
Config: context.schedulerConfig},
|
||||
nil...)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler: %v", err)
|
||||
}
|
||||
context.scheduler.Run()
|
||||
return &context
|
||||
return context
|
||||
}
|
||||
|
||||
// initDisruptionController initializes and runs a Disruption Controller to properly
|
||||
// update PodDisuptionBudget objects.
|
||||
func initDisruptionController(context *TestContext) *disruption.DisruptionController {
|
||||
informers := informers.NewSharedInformerFactory(context.clientSet, 12*time.Hour)
|
||||
|
||||
dc := disruption.NewDisruptionController(
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informers.Core().V1().ReplicationControllers(),
|
||||
informers.Extensions().V1beta1().ReplicaSets(),
|
||||
informers.Extensions().V1beta1().Deployments(),
|
||||
informers.Apps().V1beta1().StatefulSets(),
|
||||
context.clientSet)
|
||||
|
||||
informers.Start(context.schedulerConfig.StopEverything)
|
||||
informers.WaitForCacheSync(context.schedulerConfig.StopEverything)
|
||||
go dc.Run(context.schedulerConfig.StopEverything)
|
||||
return dc
|
||||
}
|
||||
|
||||
// initTest initializes a test environment and creates master and scheduler with default
|
||||
// configuration.
|
||||
func initTest(t *testing.T, nsPrefix string) *TestContext {
|
||||
return initTestScheduler(t, initTestMaster(t, nsPrefix, nil), nil, true, nil)
|
||||
}
|
||||
|
||||
// initTestDisablePreemption initializes a test environment and creates master and scheduler with default
|
||||
// configuration but with pod preemption disabled.
|
||||
func initTestDisablePreemption(t *testing.T, nsPrefix string) *TestContext {
|
||||
return initTestSchedulerWithOptions(
|
||||
t, initTestMaster(t, nsPrefix, nil), nil, true, nil, true)
|
||||
}
|
||||
|
||||
// cleanupTest deletes the scheduler and the test namespace. It should be called
|
||||
@ -113,7 +269,8 @@ func cleanupTest(t *testing.T, context *TestContext) {
|
||||
|
||||
// waitForReflection waits till the passFunc confirms that the object it expects
|
||||
// to see is in the store. Used to observe reflected events.
|
||||
func waitForReflection(t *testing.T, nodeLister corelisters.NodeLister, key string, passFunc func(n interface{}) bool) error {
|
||||
func waitForReflection(t *testing.T, nodeLister corelisters.NodeLister, key string,
|
||||
passFunc func(n interface{}) bool) error {
|
||||
nodes := []*v1.Node{}
|
||||
err := wait.Poll(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
n, err := nodeLister.Get(key)
|
||||
@ -185,6 +342,12 @@ func createNode(cs clientset.Interface, name string, res *v1.ResourceList) (*v1.
|
||||
return cs.CoreV1().Nodes().Create(n)
|
||||
}
|
||||
|
||||
// updateNodeStatus updates the status of node.
|
||||
func updateNodeStatus(cs clientset.Interface, node *v1.Node) error {
|
||||
_, err := cs.CoreV1().Nodes().UpdateStatus(node)
|
||||
return err
|
||||
}
|
||||
|
||||
// createNodes creates `numNodes` nodes. The created node names will be in the
|
||||
// form of "`prefix`-X" where X is an ordinal.
|
||||
func createNodes(cs clientset.Interface, prefix string, res *v1.ResourceList, numNodes int) ([]*v1.Node, error) {
|
||||
@ -228,7 +391,7 @@ func initPausePod(cs clientset.Interface, conf *pausePodConfig) *v1.Pod {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: conf.Name,
|
||||
Image: framework.GetPauseImageName(cs),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
Tolerations: conf.Tolerations,
|
||||
@ -252,7 +415,8 @@ func createPausePod(cs clientset.Interface, p *v1.Pod) (*v1.Pod, error) {
|
||||
// createPausePodWithResource creates a pod with "Pause" image and the given
|
||||
// resources and returns its pointer and error status. The resource list can be
|
||||
// nil.
|
||||
func createPausePodWithResource(cs clientset.Interface, podName string, nsName string, res *v1.ResourceList) (*v1.Pod, error) {
|
||||
func createPausePodWithResource(cs clientset.Interface, podName string,
|
||||
nsName string, res *v1.ResourceList) (*v1.Pod, error) {
|
||||
var conf pausePodConfig
|
||||
if res == nil {
|
||||
conf = pausePodConfig{
|
||||
@ -346,7 +510,8 @@ func podUnschedulable(c clientset.Interface, podNamespace, podName string) wait.
|
||||
return false, nil
|
||||
}
|
||||
_, cond := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
|
||||
return cond != nil && cond.Status == v1.ConditionFalse && cond.Reason == v1.PodReasonUnschedulable, nil
|
||||
return cond != nil && cond.Status == v1.ConditionFalse &&
|
||||
cond.Reason == v1.PodReasonUnschedulable, nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -374,6 +539,59 @@ func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error {
|
||||
return waitForPodUnschedulableWithTimeout(cs, pod, 30*time.Second)
|
||||
}
|
||||
|
||||
// waitCachedPDBsStable waits for PDBs in scheduler cache to have "CurrentHealthy" status equal to
|
||||
// the expected values.
|
||||
func waitCachedPDBsStable(context *TestContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error {
|
||||
return wait.Poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||
cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(cachedPDBs) != len(pdbs) {
|
||||
return false, nil
|
||||
}
|
||||
for i, pdb := range pdbs {
|
||||
found := false
|
||||
for _, cpdb := range cachedPDBs {
|
||||
if pdb.Name == cpdb.Name && pdb.Namespace == cpdb.Namespace {
|
||||
found = true
|
||||
if cpdb.Status.CurrentHealthy != pdbPodNum[i] {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// waitCachedPodsStable waits until scheduler cache has the given pods.
|
||||
func waitCachedPodsStable(context *TestContext, pods []*v1.Pod) error {
|
||||
return wait.Poll(time.Second, 30*time.Second, func() (bool, error) {
|
||||
cachedPods, err := context.scheduler.Config().SchedulerCache.List(labels.Everything())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(pods) != len(cachedPods) {
|
||||
return false, nil
|
||||
}
|
||||
for _, p := range pods {
|
||||
actualPod, err1 := context.clientSet.CoreV1().Pods(p.Namespace).Get(p.Name, metav1.GetOptions{})
|
||||
if err1 != nil {
|
||||
return false, err1
|
||||
}
|
||||
cachedPod, err2 := context.scheduler.Config().SchedulerCache.GetPod(actualPod)
|
||||
if err2 != nil || cachedPod == nil {
|
||||
return false, err2
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// deletePod deletes the given pod in the given namespace.
|
||||
func deletePod(cs clientset.Interface, podName string, nsName string) error {
|
||||
return cs.CoreV1().Pods(nsName).Delete(podName, metav1.NewDeleteOptions(0))
|
||||
@ -388,8 +606,35 @@ func cleanupPods(cs clientset.Interface, t *testing.T, pods []*v1.Pod) {
|
||||
}
|
||||
}
|
||||
for _, p := range pods {
|
||||
if err := wait.Poll(time.Second, wait.ForeverTestTimeout, podDeleted(cs, p.Namespace, p.Name)); err != nil {
|
||||
if err := wait.Poll(time.Second, wait.ForeverTestTimeout,
|
||||
podDeleted(cs, p.Namespace, p.Name)); err != nil {
|
||||
t.Errorf("error while waiting for pod %v/%v to get deleted: %v", p.Namespace, p.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// noPodsInNamespace returns true if no pods in the given namespace.
|
||||
func noPodsInNamespace(c clientset.Interface, podNamespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pods, err := c.CoreV1().Pods(podNamespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return len(pods.Items) == 0, nil
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupPodsInNamespace deletes the pods in the given namespace and waits for them to
|
||||
// be actually deleted.
|
||||
func cleanupPodsInNamespace(cs clientset.Interface, t *testing.T, ns string) {
|
||||
if err := cs.CoreV1().Pods(ns).DeleteCollection(nil, metav1.ListOptions{}); err != nil {
|
||||
t.Errorf("error while listing pod in namespace %v: %v", ns, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := wait.Poll(time.Second, wait.ForeverTestTimeout,
|
||||
noPodsInNamespace(cs, ns)); err != nil {
|
||||
t.Errorf("error while waiting for pods in namespace %v: %v", ns, err)
|
||||
}
|
||||
}
|
||||
|
245
vendor/k8s.io/kubernetes/test/integration/scheduler/volume_binding_test.go
generated
vendored
245
vendor/k8s.io/kubernetes/test/integration/scheduler/volume_binding_test.go
generated
vendored
@ -20,8 +20,6 @@ package scheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -33,19 +31,11 @@ import (
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
type testConfig struct {
|
||||
@ -75,94 +65,107 @@ const (
|
||||
nodeAffinityLabelKey = "kubernetes.io/hostname"
|
||||
)
|
||||
|
||||
type testPV struct {
|
||||
name string
|
||||
scMode storagev1.VolumeBindingMode
|
||||
preboundPVC string
|
||||
node string
|
||||
}
|
||||
|
||||
type testPVC struct {
|
||||
name string
|
||||
scMode storagev1.VolumeBindingMode
|
||||
preboundPV string
|
||||
}
|
||||
|
||||
func TestVolumeBinding(t *testing.T) {
|
||||
config := setupCluster(t, "volume-scheduling", 2)
|
||||
defer config.teardown()
|
||||
|
||||
cases := map[string]struct {
|
||||
pod *v1.Pod
|
||||
pvs []*v1.PersistentVolume
|
||||
pvcs []*v1.PersistentVolumeClaim
|
||||
pvs []*testPV
|
||||
pvcs []*testPVC
|
||||
// Create these, but they should not be bound in the end
|
||||
unboundPvcs []*v1.PersistentVolumeClaim
|
||||
unboundPvs []*v1.PersistentVolume
|
||||
unboundPvcs []*testPVC
|
||||
unboundPvs []*testPV
|
||||
shouldFail bool
|
||||
}{
|
||||
"immediate can bind": {
|
||||
pod: makePod("pod-i-canbind", config.ns, []string{"pvc-i-canbind"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-i-canbind", classImmediate, "", "", node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-canbind", config.ns, &classImmediate, "")},
|
||||
pvs: []*testPV{{"pv-i-canbind", modeImmediate, "", node1}},
|
||||
pvcs: []*testPVC{{"pvc-i-canbind", modeImmediate, ""}},
|
||||
},
|
||||
"immediate cannot bind": {
|
||||
pod: makePod("pod-i-cannotbind", config.ns, []string{"pvc-i-cannotbind"}),
|
||||
unboundPvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-cannotbind", config.ns, &classImmediate, "")},
|
||||
unboundPvcs: []*testPVC{{"pvc-i-cannotbind", modeImmediate, ""}},
|
||||
shouldFail: true,
|
||||
},
|
||||
"immediate pvc prebound": {
|
||||
pod: makePod("pod-i-pvc-prebound", config.ns, []string{"pvc-i-prebound"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-i-pvc-prebound", classImmediate, "", "", node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-prebound", config.ns, &classImmediate, "pv-i-pvc-prebound")},
|
||||
pvs: []*testPV{{"pv-i-pvc-prebound", modeImmediate, "", node1}},
|
||||
pvcs: []*testPVC{{"pvc-i-prebound", modeImmediate, "pv-i-pvc-prebound"}},
|
||||
},
|
||||
"immediate pv prebound": {
|
||||
pod: makePod("pod-i-pv-prebound", config.ns, []string{"pvc-i-pv-prebound"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-i-prebound", classImmediate, "pvc-i-pv-prebound", config.ns, node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-pv-prebound", config.ns, &classImmediate, "")},
|
||||
pvs: []*testPV{{"pv-i-prebound", modeImmediate, "pvc-i-pv-prebound", node1}},
|
||||
pvcs: []*testPVC{{"pvc-i-pv-prebound", modeImmediate, ""}},
|
||||
},
|
||||
"wait can bind": {
|
||||
pod: makePod("pod-w-canbind", config.ns, []string{"pvc-w-canbind"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-w-canbind", classWait, "", "", node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-canbind", config.ns, &classWait, "")},
|
||||
pvs: []*testPV{{"pv-w-canbind", modeWait, "", node1}},
|
||||
pvcs: []*testPVC{{"pvc-w-canbind", modeWait, ""}},
|
||||
},
|
||||
"wait cannot bind": {
|
||||
pod: makePod("pod-w-cannotbind", config.ns, []string{"pvc-w-cannotbind"}),
|
||||
unboundPvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-cannotbind", config.ns, &classWait, "")},
|
||||
unboundPvcs: []*testPVC{{"pvc-w-cannotbind", modeWait, ""}},
|
||||
shouldFail: true,
|
||||
},
|
||||
"wait pvc prebound": {
|
||||
pod: makePod("pod-w-pvc-prebound", config.ns, []string{"pvc-w-prebound"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-w-pvc-prebound", classWait, "", "", node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-prebound", config.ns, &classWait, "pv-w-pvc-prebound")},
|
||||
pvs: []*testPV{{"pv-w-pvc-prebound", modeWait, "", node1}},
|
||||
pvcs: []*testPVC{{"pvc-w-prebound", modeWait, "pv-w-pvc-prebound"}},
|
||||
},
|
||||
"wait pv prebound": {
|
||||
pod: makePod("pod-w-pv-prebound", config.ns, []string{"pvc-w-pv-prebound"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-w-prebound", classWait, "pvc-w-pv-prebound", config.ns, node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-pv-prebound", config.ns, &classWait, "")},
|
||||
pvs: []*testPV{{"pv-w-prebound", modeWait, "pvc-w-pv-prebound", node1}},
|
||||
pvcs: []*testPVC{{"pvc-w-pv-prebound", modeWait, ""}},
|
||||
},
|
||||
"wait can bind two": {
|
||||
pod: makePod("pod-w-canbind-2", config.ns, []string{"pvc-w-canbind-2", "pvc-w-canbind-3"}),
|
||||
pvs: []*v1.PersistentVolume{
|
||||
makePV(t, "pv-w-canbind-2", classWait, "", "", node2),
|
||||
makePV(t, "pv-w-canbind-3", classWait, "", "", node2),
|
||||
pvs: []*testPV{
|
||||
{"pv-w-canbind-2", modeWait, "", node2},
|
||||
{"pv-w-canbind-3", modeWait, "", node2},
|
||||
},
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-w-canbind-2", config.ns, &classWait, ""),
|
||||
makePVC("pvc-w-canbind-3", config.ns, &classWait, ""),
|
||||
pvcs: []*testPVC{
|
||||
{"pvc-w-canbind-2", modeWait, ""},
|
||||
{"pvc-w-canbind-3", modeWait, ""},
|
||||
},
|
||||
unboundPvs: []*v1.PersistentVolume{
|
||||
makePV(t, "pv-w-canbind-5", classWait, "", "", node1),
|
||||
unboundPvs: []*testPV{
|
||||
{"pv-w-canbind-5", modeWait, "", node1},
|
||||
},
|
||||
},
|
||||
"wait cannot bind two": {
|
||||
pod: makePod("pod-w-cannotbind-2", config.ns, []string{"pvc-w-cannotbind-1", "pvc-w-cannotbind-2"}),
|
||||
unboundPvcs: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-w-cannotbind-1", config.ns, &classWait, ""),
|
||||
makePVC("pvc-w-cannotbind-2", config.ns, &classWait, ""),
|
||||
unboundPvcs: []*testPVC{
|
||||
{"pvc-w-cannotbind-1", modeWait, ""},
|
||||
{"pvc-w-cannotbind-2", modeWait, ""},
|
||||
},
|
||||
unboundPvs: []*v1.PersistentVolume{
|
||||
makePV(t, "pv-w-cannotbind-1", classWait, "", "", node2),
|
||||
makePV(t, "pv-w-cannotbind-2", classWait, "", "", node1),
|
||||
unboundPvs: []*testPV{
|
||||
{"pv-w-cannotbind-1", modeWait, "", node2},
|
||||
{"pv-w-cannotbind-2", modeWait, "", node1},
|
||||
},
|
||||
shouldFail: true,
|
||||
},
|
||||
"mix immediate and wait": {
|
||||
pod: makePod("pod-mix-bound", config.ns, []string{"pvc-w-canbind-4", "pvc-i-canbind-2"}),
|
||||
pvs: []*v1.PersistentVolume{
|
||||
makePV(t, "pv-w-canbind-4", classWait, "", "", node1),
|
||||
makePV(t, "pv-i-canbind-2", classImmediate, "", "", node1),
|
||||
pvs: []*testPV{
|
||||
{"pv-w-canbind-4", modeWait, "", node1},
|
||||
{"pv-i-canbind-2", modeImmediate, "", node1},
|
||||
},
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-w-canbind-4", config.ns, &classWait, ""),
|
||||
makePVC("pvc-i-canbind-2", config.ns, &classImmediate, ""),
|
||||
pvcs: []*testPVC{
|
||||
{"pvc-w-canbind-4", modeWait, ""},
|
||||
{"pvc-i-canbind-2", modeImmediate, ""},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -170,26 +173,41 @@ func TestVolumeBinding(t *testing.T) {
|
||||
for name, test := range cases {
|
||||
glog.Infof("Running test %v", name)
|
||||
|
||||
// Create two StorageClasses
|
||||
suffix := rand.String(4)
|
||||
classes := map[storagev1.VolumeBindingMode]*storagev1.StorageClass{}
|
||||
classes[modeImmediate] = makeStorageClass(fmt.Sprintf("immediate-%v", suffix), &modeImmediate)
|
||||
classes[modeWait] = makeStorageClass(fmt.Sprintf("wait-%v", suffix), &modeWait)
|
||||
for _, sc := range classes {
|
||||
if _, err := config.client.StorageV1().StorageClasses().Create(sc); err != nil {
|
||||
t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create PVs
|
||||
for _, pv := range test.pvs {
|
||||
for _, pvConfig := range test.pvs {
|
||||
pv := makePV(pvConfig.name, classes[pvConfig.scMode].Name, pvConfig.preboundPVC, config.ns, pvConfig.node)
|
||||
if _, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, pv := range test.unboundPvs {
|
||||
for _, pvConfig := range test.unboundPvs {
|
||||
pv := makePV(pvConfig.name, classes[pvConfig.scMode].Name, pvConfig.preboundPVC, config.ns, pvConfig.node)
|
||||
if _, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create PVCs
|
||||
for _, pvc := range test.pvcs {
|
||||
for _, pvcConfig := range test.pvcs {
|
||||
pvc := makePVC(pvcConfig.name, config.ns, &classes[pvcConfig.scMode].Name, pvcConfig.preboundPV)
|
||||
if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err)
|
||||
}
|
||||
}
|
||||
for _, pvc := range test.unboundPvcs {
|
||||
for _, pvcConfig := range test.unboundPvcs {
|
||||
pvc := makePVC(pvcConfig.name, config.ns, &classes[pvcConfig.scMode].Name, pvcConfig.preboundPV)
|
||||
if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err)
|
||||
}
|
||||
@ -211,23 +229,20 @@ func TestVolumeBinding(t *testing.T) {
|
||||
|
||||
// Validate PVC/PV binding
|
||||
for _, pvc := range test.pvcs {
|
||||
validatePVCPhase(t, config.client, pvc, v1.ClaimBound)
|
||||
validatePVCPhase(t, config.client, pvc.name, config.ns, v1.ClaimBound)
|
||||
}
|
||||
for _, pvc := range test.unboundPvcs {
|
||||
validatePVCPhase(t, config.client, pvc, v1.ClaimPending)
|
||||
validatePVCPhase(t, config.client, pvc.name, config.ns, v1.ClaimPending)
|
||||
}
|
||||
for _, pv := range test.pvs {
|
||||
validatePVPhase(t, config.client, pv, v1.VolumeBound)
|
||||
validatePVPhase(t, config.client, pv.name, v1.VolumeBound)
|
||||
}
|
||||
for _, pv := range test.unboundPvs {
|
||||
validatePVPhase(t, config.client, pv, v1.VolumeAvailable)
|
||||
validatePVPhase(t, config.client, pv.name, v1.VolumeAvailable)
|
||||
}
|
||||
|
||||
// TODO: validate events on Pods and PVCs
|
||||
|
||||
config.client.CoreV1().Pods(config.ns).DeleteCollection(deleteOption, metav1.ListOptions{})
|
||||
config.client.CoreV1().PersistentVolumeClaims(config.ns).DeleteCollection(deleteOption, metav1.ListOptions{})
|
||||
config.client.CoreV1().PersistentVolumes().DeleteCollection(deleteOption, metav1.ListOptions{})
|
||||
// Force delete objects, but they still may not be immediately removed
|
||||
deleteTestObjects(config.client, config.ns, deleteOption)
|
||||
}
|
||||
}
|
||||
|
||||
@ -240,7 +255,7 @@ func TestVolumeBindingStress(t *testing.T) {
|
||||
pvs := []*v1.PersistentVolume{}
|
||||
pvcs := []*v1.PersistentVolumeClaim{}
|
||||
for i := 0; i < podLimit*volsPerPod; i++ {
|
||||
pv := makePV(t, fmt.Sprintf("pv-stress-%v", i), classWait, "", "", node1)
|
||||
pv := makePV(fmt.Sprintf("pv-stress-%v", i), classWait, "", "", node1)
|
||||
pvc := makePVC(fmt.Sprintf("pvc-stress-%v", i), config.ns, &classWait, "")
|
||||
|
||||
if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
@ -271,27 +286,27 @@ func TestVolumeBindingStress(t *testing.T) {
|
||||
|
||||
// Validate Pods scheduled
|
||||
for _, pod := range pods {
|
||||
if err := waitForPodToSchedule(config.client, pod); err != nil {
|
||||
// Use increased timeout for stress test because there is a higher chance of
|
||||
// PV sync error
|
||||
if err := waitForPodToScheduleWithTimeout(config.client, pod, 60*time.Second); err != nil {
|
||||
t.Errorf("Failed to schedule Pod %q: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate PVC/PV binding
|
||||
for _, pvc := range pvcs {
|
||||
validatePVCPhase(t, config.client, pvc, v1.ClaimBound)
|
||||
validatePVCPhase(t, config.client, pvc.Name, config.ns, v1.ClaimBound)
|
||||
}
|
||||
for _, pv := range pvs {
|
||||
validatePVPhase(t, config.client, pv, v1.VolumeBound)
|
||||
validatePVPhase(t, config.client, pv.Name, v1.VolumeBound)
|
||||
}
|
||||
|
||||
// TODO: validate events on Pods and PVCs
|
||||
}
|
||||
|
||||
func TestPVAffinityConflict(t *testing.T) {
|
||||
config := setupCluster(t, "volume-scheduling", 3)
|
||||
defer config.teardown()
|
||||
|
||||
pv := makePV(t, "local-pv", classImmediate, "", "", node1)
|
||||
pv := makePV("local-pv", classImmediate, "", "", node1)
|
||||
pvc := makePVC("local-pvc", config.ns, &classImmediate, "")
|
||||
|
||||
// Create PV
|
||||
@ -347,27 +362,17 @@ func TestPVAffinityConflict(t *testing.T) {
|
||||
}
|
||||
|
||||
func setupCluster(t *testing.T, nsName string, numberOfNodes int) *testConfig {
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-h.Initialized
|
||||
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
|
||||
// Enable feature gates
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true,PersistentLocalVolumes=true")
|
||||
|
||||
// Build clientset and informers for controllers.
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
informers := informers.NewSharedInformerFactory(clientset, time.Second)
|
||||
|
||||
// Start master
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
|
||||
_, _, closeFn := framework.RunAMasterUsingServer(masterConfig, s, h)
|
||||
ns := framework.CreateTestingNamespace(nsName, s, t).Name
|
||||
|
||||
controllerCh := make(chan struct{})
|
||||
|
||||
context := initTestScheduler(t, initTestMaster(t, nsName, nil), controllerCh, false, nil)
|
||||
|
||||
clientset := context.clientSet
|
||||
ns := context.ns.Name
|
||||
informers := context.informerFactory
|
||||
|
||||
// Start PV controller for volume binding.
|
||||
params := persistentvolume.ControllerParameters{
|
||||
KubeClient: clientset,
|
||||
@ -379,7 +384,7 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int) *testConfig {
|
||||
ClaimInformer: informers.Core().V1().PersistentVolumeClaims(),
|
||||
ClassInformer: informers.Storage().V1().StorageClasses(),
|
||||
PodInformer: informers.Core().V1().Pods(),
|
||||
EventRecorder: nil, // TODO: add one so we can test PV events
|
||||
NodeInformer: informers.Core().V1().Nodes(),
|
||||
EnableDynamicProvisioning: true,
|
||||
}
|
||||
ctrl, err := persistentvolume.NewController(params)
|
||||
@ -388,40 +393,6 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int) *testConfig {
|
||||
}
|
||||
go ctrl.Run(controllerCh)
|
||||
|
||||
// Start scheduler
|
||||
configurator := factory.NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
clientset,
|
||||
informers.Core().V1().Nodes(),
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Core().V1().PersistentVolumes(),
|
||||
informers.Core().V1().PersistentVolumeClaims(),
|
||||
informers.Core().V1().ReplicationControllers(),
|
||||
informers.Extensions().V1beta1().ReplicaSets(),
|
||||
informers.Apps().V1beta1().StatefulSets(),
|
||||
informers.Core().V1().Services(),
|
||||
informers.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informers.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
true, // Enable EqualCache by default.
|
||||
)
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
sched, err := scheduler.NewFromConfigurator(configurator, func(cfg *scheduler.Config) {
|
||||
cfg.StopEverything = controllerCh
|
||||
cfg.Recorder = eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(clientset.CoreV1().RESTClient()).Events("")})
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create scheduler: %v.", err)
|
||||
}
|
||||
|
||||
go sched.Run()
|
||||
|
||||
// Waiting for all controller sync.
|
||||
informers.Start(controllerCh)
|
||||
informers.WaitForCacheSync(controllerCh)
|
||||
|
||||
// Create shared objects
|
||||
// Create nodes
|
||||
for i := 0; i < numberOfNodes; i++ {
|
||||
@ -466,18 +437,20 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int) *testConfig {
|
||||
ns: ns,
|
||||
stop: controllerCh,
|
||||
teardown: func() {
|
||||
clientset.CoreV1().Pods(ns).DeleteCollection(nil, metav1.ListOptions{})
|
||||
clientset.CoreV1().PersistentVolumeClaims(ns).DeleteCollection(nil, metav1.ListOptions{})
|
||||
clientset.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
clientset.StorageV1().StorageClasses().DeleteCollection(nil, metav1.ListOptions{})
|
||||
clientset.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
close(controllerCh)
|
||||
closeFn()
|
||||
deleteTestObjects(clientset, ns, nil)
|
||||
cleanupTest(t, context)
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false,LocalPersistentVolumes=false")
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func deleteTestObjects(client clientset.Interface, ns string, option *metav1.DeleteOptions) {
|
||||
client.CoreV1().Pods(ns).DeleteCollection(option, metav1.ListOptions{})
|
||||
client.CoreV1().PersistentVolumeClaims(ns).DeleteCollection(option, metav1.ListOptions{})
|
||||
client.CoreV1().PersistentVolumes().DeleteCollection(option, metav1.ListOptions{})
|
||||
client.StorageV1().StorageClasses().DeleteCollection(option, metav1.ListOptions{})
|
||||
}
|
||||
|
||||
func makeStorageClass(name string, mode *storagev1.VolumeBindingMode) *storagev1.StorageClass {
|
||||
return &storagev1.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -488,7 +461,7 @@ func makeStorageClass(name string, mode *storagev1.VolumeBindingMode) *storagev1
|
||||
}
|
||||
}
|
||||
|
||||
func makePV(t *testing.T, name, scName, pvcName, ns, node string) *v1.PersistentVolume {
|
||||
func makePV(name, scName, pvcName, ns, node string) *v1.PersistentVolume {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@ -585,25 +558,25 @@ func makePod(name, ns string, pvcs []string) *v1.Pod {
|
||||
}
|
||||
}
|
||||
|
||||
func validatePVCPhase(t *testing.T, client clientset.Interface, pvc *v1.PersistentVolumeClaim, phase v1.PersistentVolumeClaimPhase) {
|
||||
claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
|
||||
func validatePVCPhase(t *testing.T, client clientset.Interface, pvcName string, ns string, phase v1.PersistentVolumeClaimPhase) {
|
||||
claim, err := client.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get PVC %v/%v: %v", pvc.Namespace, pvc.Name, err)
|
||||
t.Errorf("Failed to get PVC %v/%v: %v", ns, pvcName, err)
|
||||
}
|
||||
|
||||
if claim.Status.Phase != phase {
|
||||
t.Errorf("PVC %v/%v phase not %v, got %v", pvc.Namespace, pvc.Name, phase, claim.Status.Phase)
|
||||
t.Errorf("PVC %v/%v phase not %v, got %v", ns, pvcName, phase, claim.Status.Phase)
|
||||
}
|
||||
}
|
||||
|
||||
func validatePVPhase(t *testing.T, client clientset.Interface, pv *v1.PersistentVolume, phase v1.PersistentVolumePhase) {
|
||||
pv, err := client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
|
||||
func validatePVPhase(t *testing.T, client clientset.Interface, pvName string, phase v1.PersistentVolumePhase) {
|
||||
pv, err := client.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get PV %v: %v", pv.Name, err)
|
||||
t.Errorf("Failed to get PV %v: %v", pvName, err)
|
||||
}
|
||||
|
||||
if pv.Status.Phase != phase {
|
||||
t.Errorf("PV %v phase not %v, got %v", pv.Name, phase, pv.Status.Phase)
|
||||
t.Errorf("PV %v phase not %v, got %v", pvName, phase, pv.Status.Phase)
|
||||
}
|
||||
}
|
||||
|
||||
|
3
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/BUILD
generated
vendored
@ -14,11 +14,10 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/scheduler_perf",
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//test/integration/util:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
|
4
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_test.go
generated
vendored
@ -50,10 +50,6 @@ var (
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "sample-node-",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
// TODO: investigate why this is needed.
|
||||
ExternalID: "foo",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
|
7
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/util.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/util.go
generated
vendored
@ -17,10 +17,9 @@ limitations under the License.
|
||||
package benchmark
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/test/integration/util"
|
||||
@ -37,11 +36,11 @@ func mustSetupScheduler() (scheduler.Configurator, util.ShutdownFunc) {
|
||||
apiURL, apiShutdown := util.StartApiserver()
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{
|
||||
Host: apiURL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()},
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}},
|
||||
QPS: 5000.0,
|
||||
Burst: 5000,
|
||||
})
|
||||
schedulerConfig, schedulerShutdown := util.StartScheduler(clientSet, true)
|
||||
schedulerConfig, schedulerShutdown := util.StartScheduler(clientSet)
|
||||
|
||||
shutdownFunc := func() {
|
||||
schedulerShutdown()
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/secrets/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/secrets/BUILD
generated
vendored
@ -14,11 +14,11 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
|
4
vendor/k8s.io/kubernetes/test/integration/secrets/secrets_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/secrets/secrets_test.go
generated
vendored
@ -23,9 +23,9 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
@ -41,7 +41,7 @@ func TestSecrets(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("secret", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/serviceaccount/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/serviceaccount/BUILD
generated
vendored
@ -14,7 +14,6 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
@ -27,6 +26,7 @@ go_test(
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
|
||||
|
8
vendor/k8s.io/kubernetes/test/integration/serviceaccount/service_account_test.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/integration/serviceaccount/service_account_test.go
generated
vendored
@ -33,6 +33,7 @@ import (
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
@ -44,7 +45,6 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
internalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@ -359,11 +359,11 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
||||
}))
|
||||
|
||||
// Anonymous client config
|
||||
clientConfig := restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}}
|
||||
clientConfig := restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}
|
||||
// Root client
|
||||
// TODO: remove rootClient after we refactor pkg/admission to use the clientset.
|
||||
rootClientset := clientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}, BearerToken: rootToken})
|
||||
internalRootClientset := internalclientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}, BearerToken: rootToken})
|
||||
rootClientset := clientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}, BearerToken: rootToken})
|
||||
internalRootClientset := internalclientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}, BearerToken: rootToken})
|
||||
// Set up two authenticators:
|
||||
// 1. A token authenticator that maps the rootToken to the "root" user
|
||||
// 2. A ServiceAccountToken authenticator that validates ServiceAccount tokens
|
||||
|
57
vendor/k8s.io/kubernetes/test/integration/statefulset/BUILD
generated
vendored
Normal file
57
vendor/k8s.io/kubernetes/test/integration/statefulset/BUILD
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["util.go"],
|
||||
importpath = "k8s.io/kubernetes/test/integration/statefulset",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
#"//pkg/api:go_default_library",
|
||||
"//pkg/controller/statefulset:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"main_test.go",
|
||||
"statefulset_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
6
vendor/k8s.io/kubernetes/test/integration/statefulset/OWNERS
generated
vendored
Normal file
6
vendor/k8s.io/kubernetes/test/integration/statefulset/OWNERS
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
reviewers:
|
||||
- barney-s
|
||||
- crimsonfaith91
|
||||
approvers:
|
||||
- barney-s
|
||||
- crimsonfaith91
|
27
vendor/k8s.io/kubernetes/test/integration/statefulset/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/statefulset/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package statefulset
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
147
vendor/k8s.io/kubernetes/test/integration/statefulset/statefulset_test.go
generated
vendored
Normal file
147
vendor/k8s.io/kubernetes/test/integration/statefulset/statefulset_test.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package statefulset
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestSpecReplicasChange(t *testing.T) {
|
||||
s, closeFn, rm, informers, c := scSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("test-spec-replicas-change", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
stopCh := runControllerAndInformers(rm, informers)
|
||||
defer close(stopCh)
|
||||
|
||||
createHeadlessService(t, c, newHeadlessService(ns.Name))
|
||||
sts := newSTS("sts", ns.Name, 2)
|
||||
stss, _ := createSTSsPods(t, c, []*v1beta1.StatefulSet{sts}, []*v1.Pod{})
|
||||
sts = stss[0]
|
||||
waitSTSStable(t, c, sts)
|
||||
|
||||
// Update .Spec.Replicas and verify .Status.Replicas is changed accordingly
|
||||
scaleSTS(t, c, sts, 3)
|
||||
scaleSTS(t, c, sts, 0)
|
||||
scaleSTS(t, c, sts, 2)
|
||||
|
||||
// Add a template annotation change to test STS's status does update
|
||||
// without .Spec.Replicas change
|
||||
stsClient := c.AppsV1beta1().StatefulSets(ns.Name)
|
||||
var oldGeneration int64
|
||||
newSTS := updateSTS(t, stsClient, sts.Name, func(sts *v1beta1.StatefulSet) {
|
||||
oldGeneration = sts.Generation
|
||||
sts.Spec.Template.Annotations = map[string]string{"test": "annotation"}
|
||||
})
|
||||
savedGeneration := newSTS.Generation
|
||||
if savedGeneration == oldGeneration {
|
||||
t.Fatalf("failed to verify .Generation has incremented for sts %s", sts.Name)
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
newSTS, err := stsClient.Get(sts.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return *newSTS.Status.ObservedGeneration >= savedGeneration, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to verify .Status.ObservedGeneration has incremented for sts %s: %v", sts.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeletingAndFailedPods(t *testing.T) {
|
||||
s, closeFn, rm, informers, c := scSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("test-deleting-and-failed-pods", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
stopCh := runControllerAndInformers(rm, informers)
|
||||
defer close(stopCh)
|
||||
|
||||
labelMap := labelMap()
|
||||
sts := newSTS("sts", ns.Name, 2)
|
||||
stss, _ := createSTSsPods(t, c, []*v1beta1.StatefulSet{sts}, []*v1.Pod{})
|
||||
sts = stss[0]
|
||||
waitSTSStable(t, c, sts)
|
||||
|
||||
// Verify STS creates 2 pods
|
||||
podClient := c.Core().Pods(ns.Name)
|
||||
pods := getPods(t, podClient, labelMap)
|
||||
if len(pods.Items) != 2 {
|
||||
t.Fatalf("len(pods) = %d, want 2", len(pods.Items))
|
||||
}
|
||||
|
||||
// Set first pod as deleting pod
|
||||
// Set finalizers for the pod to simulate pending deletion status
|
||||
deletingPod := &pods.Items[0]
|
||||
updatePod(t, podClient, deletingPod.Name, func(pod *v1.Pod) {
|
||||
pod.Finalizers = []string{"fake.example.com/blockDeletion"}
|
||||
})
|
||||
if err := c.Core().Pods(ns.Name).Delete(deletingPod.Name, &metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("error deleting pod %s: %v", deletingPod.Name, err)
|
||||
}
|
||||
|
||||
// Set second pod as failed pod
|
||||
failedPod := &pods.Items[1]
|
||||
updatePodStatus(t, podClient, failedPod.Name, func(pod *v1.Pod) {
|
||||
pod.Status.Phase = v1.PodFailed
|
||||
})
|
||||
|
||||
if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
// Verify only 2 pods exist: deleting pod and new pod replacing failed pod
|
||||
pods = getPods(t, podClient, labelMap)
|
||||
if len(pods.Items) != 2 {
|
||||
return false, nil
|
||||
}
|
||||
// Verify deleting pod still exists
|
||||
// Immediately return false with an error if it does not exist
|
||||
if pods.Items[0].UID != deletingPod.UID && pods.Items[1].UID != deletingPod.UID {
|
||||
return false, fmt.Errorf("expected deleting pod %s still exists, but it is not found", deletingPod.Name)
|
||||
}
|
||||
// Verify failed pod does not exist anymore
|
||||
if pods.Items[0].UID == failedPod.UID || pods.Items[1].UID == failedPod.UID {
|
||||
return false, nil
|
||||
}
|
||||
// Verify both pods have non-failed status
|
||||
return pods.Items[0].Status.Phase != v1.PodFailed && pods.Items[1].Status.Phase != v1.PodFailed, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to verify failed pod %s has been replaced with a new non-failed pod, and deleting pod %s survives: %v", failedPod.Name, deletingPod.Name, err)
|
||||
}
|
||||
|
||||
// Remove finalizers of deleting pod to simulate successful deletion
|
||||
updatePod(t, podClient, deletingPod.Name, func(pod *v1.Pod) {
|
||||
pod.Finalizers = []string{}
|
||||
})
|
||||
|
||||
if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
// Verify only 2 pods exist: new non-deleting pod replacing deleting pod and the non-failed pod
|
||||
pods = getPods(t, podClient, labelMap)
|
||||
if len(pods.Items) != 2 {
|
||||
return false, nil
|
||||
}
|
||||
// Verify deleting pod does not exist anymore
|
||||
return pods.Items[0].UID != deletingPod.UID && pods.Items[1].UID != deletingPod.UID, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to verify deleting pod %s has been replaced with a new non-deleting pod: %v", deletingPod.Name, err)
|
||||
}
|
||||
}
|
319
vendor/k8s.io/kubernetes/test/integration/statefulset/util.go
generated
vendored
Normal file
319
vendor/k8s.io/kubernetes/test/integration/statefulset/util.go
generated
vendored
Normal file
@ -0,0 +1,319 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package statefulset
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
typedv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1"
|
||||
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/retry"
|
||||
//svc "k8s.io/kubernetes/pkg/api/v1/service"
|
||||
"k8s.io/kubernetes/pkg/controller/statefulset"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
pollInterval = 100 * time.Millisecond
|
||||
pollTimeout = 60 * time.Second
|
||||
|
||||
fakeImageName = "fake-name"
|
||||
fakeImage = "fakeimage"
|
||||
)
|
||||
|
||||
type statefulsetTester struct {
|
||||
t *testing.T
|
||||
c clientset.Interface
|
||||
service *v1.Service
|
||||
statefulset *v1beta1.StatefulSet
|
||||
}
|
||||
|
||||
func labelMap() map[string]string {
|
||||
return map[string]string{"foo": "bar"}
|
||||
}
|
||||
|
||||
// newService returns a service with a fake name for StatefulSet to be created soon
|
||||
func newHeadlessService(namespace string) *v1.Service {
|
||||
return &v1.Service{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Service",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: "fake-service-name",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
ClusterIP: "None",
|
||||
Ports: []v1.ServicePort{
|
||||
{Port: 80, Name: "http", Protocol: "TCP"},
|
||||
},
|
||||
Selector: labelMap(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// newSTS returns a StatefulSet with with a fake container image
|
||||
func newSTS(name, namespace string, replicas int) *v1beta1.StatefulSet {
|
||||
replicasCopy := int32(replicas)
|
||||
return &v1beta1.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StatefulSet",
|
||||
APIVersion: "apps/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1beta1.StatefulSetSpec{
|
||||
PodManagementPolicy: v1beta1.ParallelPodManagement,
|
||||
Replicas: &replicasCopy,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labelMap(),
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labelMap(),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{Name: "datadir", MountPath: "/data/"},
|
||||
{Name: "home", MountPath: "/home"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "datadir",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: fmt.Sprintf("/tmp/%v", "datadir"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "home",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: fmt.Sprintf("/tmp/%v", "home"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ServiceName: "fake-service-name",
|
||||
UpdateStrategy: v1beta1.StatefulSetUpdateStrategy{
|
||||
Type: v1beta1.RollingUpdateStatefulSetStrategyType,
|
||||
},
|
||||
VolumeClaimTemplates: []v1.PersistentVolumeClaim{
|
||||
// for volume mount "datadir"
|
||||
newStatefulSetPVC("fake-pvc-name"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newStatefulSetPVC(name string) v1.PersistentVolumeClaim {
|
||||
return v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{
|
||||
"volume.alpha.kubernetes.io/storage-class": "anything",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// scSetup sets up necessities for Statefulset integration test, including master, apiserver, informers, and clientset
|
||||
func scSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *statefulset.StatefulSetController, informers.SharedInformerFactory, clientset.Interface) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
|
||||
config := restclient.Config{Host: s.URL}
|
||||
clientSet, err := clientset.NewForConfig(&config)
|
||||
if err != nil {
|
||||
t.Fatalf("error in create clientset: %v", err)
|
||||
}
|
||||
resyncPeriod := 12 * time.Hour
|
||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "statefulset-informers")), resyncPeriod)
|
||||
|
||||
sc := statefulset.NewStatefulSetController(
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Apps().V1().StatefulSets(),
|
||||
informers.Core().V1().PersistentVolumeClaims(),
|
||||
informers.Apps().V1().ControllerRevisions(),
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "statefulset-controller")),
|
||||
)
|
||||
|
||||
return s, closeFn, sc, informers, clientSet
|
||||
}
|
||||
|
||||
// Run STS controller and informers
|
||||
func runControllerAndInformers(sc *statefulset.StatefulSetController, informers informers.SharedInformerFactory) chan struct{} {
|
||||
stopCh := make(chan struct{})
|
||||
informers.Start(stopCh)
|
||||
go sc.Run(5, stopCh)
|
||||
return stopCh
|
||||
}
|
||||
|
||||
func createHeadlessService(t *testing.T, clientSet clientset.Interface, headlessService *v1.Service) {
|
||||
_, err := clientSet.Core().Services(headlessService.Namespace).Create(headlessService)
|
||||
if err != nil {
|
||||
t.Fatalf("failed creating headless service: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func createSTSsPods(t *testing.T, clientSet clientset.Interface, stss []*v1beta1.StatefulSet, pods []*v1.Pod) ([]*v1beta1.StatefulSet, []*v1.Pod) {
|
||||
var createdSTSs []*v1beta1.StatefulSet
|
||||
var createdPods []*v1.Pod
|
||||
for _, sts := range stss {
|
||||
createdSTS, err := clientSet.AppsV1beta1().StatefulSets(sts.Namespace).Create(sts)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create sts %s: %v", sts.Name, err)
|
||||
}
|
||||
createdSTSs = append(createdSTSs, createdSTS)
|
||||
}
|
||||
for _, pod := range pods {
|
||||
createdPod, err := clientSet.Core().Pods(pod.Namespace).Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create pod %s: %v", pod.Name, err)
|
||||
}
|
||||
createdPods = append(createdPods, createdPod)
|
||||
}
|
||||
|
||||
return createdSTSs, createdPods
|
||||
}
|
||||
|
||||
// Verify .Status.Replicas is equal to .Spec.Replicas
|
||||
func waitSTSStable(t *testing.T, clientSet clientset.Interface, sts *v1beta1.StatefulSet) {
|
||||
stsClient := clientSet.AppsV1beta1().StatefulSets(sts.Namespace)
|
||||
desiredGeneration := sts.Generation
|
||||
if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
newSTS, err := stsClient.Get(sts.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return newSTS.Status.Replicas == *newSTS.Spec.Replicas && *newSTS.Status.ObservedGeneration >= desiredGeneration, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to verify .Status.Replicas is equal to .Spec.Replicas for sts %s: %v", sts.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func updatePod(t *testing.T, podClient typedv1.PodInterface, podName string, updateFunc func(*v1.Pod)) *v1.Pod {
|
||||
var pod *v1.Pod
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
newPod, err := podClient.Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updateFunc(newPod)
|
||||
pod, err = podClient.Update(newPod)
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to update pod %s: %v", podName, err)
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func updatePodStatus(t *testing.T, podClient typedv1.PodInterface, podName string, updateStatusFunc func(*v1.Pod)) *v1.Pod {
|
||||
var pod *v1.Pod
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
newPod, err := podClient.Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updateStatusFunc(newPod)
|
||||
pod, err = podClient.UpdateStatus(newPod)
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to update status of pod %s: %v", podName, err)
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func getPods(t *testing.T, podClient typedv1.PodInterface, labelMap map[string]string) *v1.PodList {
|
||||
podSelector := labels.Set(labelMap).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: podSelector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
if err != nil {
|
||||
t.Fatalf("failed obtaining a list of pods that match the pod labels %v: %v", labelMap, err)
|
||||
}
|
||||
if pods == nil {
|
||||
t.Fatalf("obtained a nil list of pods")
|
||||
}
|
||||
return pods
|
||||
}
|
||||
|
||||
func updateSTS(t *testing.T, stsClient typedv1beta1.StatefulSetInterface, stsName string, updateFunc func(*v1beta1.StatefulSet)) *v1beta1.StatefulSet {
|
||||
var sts *v1beta1.StatefulSet
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
newSTS, err := stsClient.Get(stsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updateFunc(newSTS)
|
||||
sts, err = stsClient.Update(newSTS)
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to update sts %s: %v", stsName, err)
|
||||
}
|
||||
return sts
|
||||
}
|
||||
|
||||
// Update .Spec.Replicas to replicas and verify .Status.Replicas is changed accordingly
|
||||
func scaleSTS(t *testing.T, c clientset.Interface, sts *v1beta1.StatefulSet, replicas int32) {
|
||||
stsClient := c.AppsV1beta1().StatefulSets(sts.Namespace)
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
newSTS, err := stsClient.Get(sts.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*newSTS.Spec.Replicas = replicas
|
||||
sts, err = stsClient.Update(newSTS)
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to update .Spec.Replicas to %d for sts %s: %v", replicas, sts.Name, err)
|
||||
}
|
||||
waitSTSStable(t, c, sts)
|
||||
}
|
2
vendor/k8s.io/kubernetes/test/integration/storageclasses/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/storageclasses/BUILD
generated
vendored
@ -14,12 +14,12 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
|
4
vendor/k8s.io/kubernetes/test/integration/storageclasses/storage_classes_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/storageclasses/storage_classes_test.go
generated
vendored
@ -25,9 +25,9 @@ import (
|
||||
storage "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
@ -38,7 +38,7 @@ func TestStorageClasses(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("storageclass", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
8
vendor/k8s.io/kubernetes/test/integration/tls/BUILD
generated
vendored
8
vendor/k8s.io/kubernetes/test/integration/tls/BUILD
generated
vendored
@ -9,14 +9,8 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app:go_default_library",
|
||||
"//cmd/kube-apiserver/app/options:go_default_library",
|
||||
"//cmd/kube-apiserver/app/testing:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
109
vendor/k8s.io/kubernetes/test/integration/tls/ciphers_test.go
generated
vendored
109
vendor/k8s.io/kubernetes/test/integration/tls/ciphers_test.go
generated
vendored
@ -19,108 +19,26 @@ package tls
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
genericapiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
client "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func runBasicSecureAPIServer(t *testing.T, ciphers []string) (uint32, error) {
|
||||
certDir, _ := ioutil.TempDir("", "test-integration-tls")
|
||||
defer os.RemoveAll(certDir)
|
||||
_, defaultServiceClusterIPRange, _ := net.ParseCIDR("10.0.0.0/24")
|
||||
kubeClientConfigValue := atomic.Value{}
|
||||
var kubePort uint32
|
||||
|
||||
go func() {
|
||||
listener, port, err := genericapiserveroptions.CreateListener("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
atomic.StoreUint32(&kubePort, uint32(port))
|
||||
|
||||
kubeAPIServerOptions := options.NewServerRunOptions()
|
||||
kubeAPIServerOptions.SecureServing.BindAddress = net.ParseIP("127.0.0.1")
|
||||
kubeAPIServerOptions.SecureServing.BindPort = port
|
||||
kubeAPIServerOptions.SecureServing.Listener = listener
|
||||
kubeAPIServerOptions.SecureServing.ServerCert.CertDirectory = certDir
|
||||
kubeAPIServerOptions.SecureServing.CipherSuites = ciphers
|
||||
kubeAPIServerOptions.InsecureServing.BindPort = 0
|
||||
kubeAPIServerOptions.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURL()}
|
||||
kubeAPIServerOptions.ServiceClusterIPRange = *defaultServiceClusterIPRange
|
||||
|
||||
tunneler, proxyTransport, err := app.CreateNodeDialer(kubeAPIServerOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerConfig, sharedInformers, versionedInformers, _, _, err := app.CreateKubeAPIServerConfig(kubeAPIServerOptions, tunneler, proxyTransport)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerConfig.ExtraConfig.EnableCoreControllers = false
|
||||
kubeClientConfigValue.Store(kubeAPIServerConfig.GenericConfig.LoopbackClientConfig)
|
||||
|
||||
kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.EmptyDelegate, sharedInformers, versionedInformers)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := kubeAPIServer.GenericAPIServer.PrepareRun().Run(wait.NeverStop); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}()
|
||||
|
||||
// Ensure server is ready
|
||||
err := wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (done bool, err error) {
|
||||
obj := kubeClientConfigValue.Load()
|
||||
if obj == nil {
|
||||
return false, nil
|
||||
}
|
||||
kubeClientConfig := kubeClientConfigValue.Load().(*rest.Config)
|
||||
kubeClientConfig.ContentType = ""
|
||||
kubeClientConfig.AcceptContentTypes = ""
|
||||
kubeClient, err := client.NewForConfig(kubeClientConfig)
|
||||
if err != nil {
|
||||
// this happens because we race the API server start
|
||||
t.Log(err)
|
||||
return false, nil
|
||||
}
|
||||
if _, err := kubeClient.Discovery().ServerVersion(); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
securePort := atomic.LoadUint32(&kubePort)
|
||||
return securePort, nil
|
||||
func runBasicSecureAPIServer(t *testing.T, ciphers []string) (kubeapiservertesting.TearDownFunc, int) {
|
||||
flags := []string{"--tls-cipher-suites", strings.Join(ciphers, ",")}
|
||||
testServer := kubeapiservertesting.StartTestServerOrDie(t, nil, flags, framework.SharedEtcd())
|
||||
return testServer.TearDownFn, testServer.ServerOpts.SecureServing.BindPort
|
||||
}
|
||||
|
||||
func TestAPICiphers(t *testing.T) {
|
||||
|
||||
basicServerCiphers := []string{"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_128_GCM_SHA256", "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA"}
|
||||
|
||||
kubePort, err := runBasicSecureAPIServer(t, basicServerCiphers)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tearDown, port := runBasicSecureAPIServer(t, basicServerCiphers)
|
||||
defer tearDown()
|
||||
tests := []struct {
|
||||
clientCiphers []uint16
|
||||
expectedError bool
|
||||
@ -138,11 +56,11 @@ func TestAPICiphers(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
runTestAPICiphers(t, i, kubePort, test.clientCiphers, test.expectedError)
|
||||
runTestAPICiphers(t, i, port, test.clientCiphers, test.expectedError)
|
||||
}
|
||||
}
|
||||
|
||||
func runTestAPICiphers(t *testing.T, testID int, kubePort uint32, clientCiphers []uint16, expectedError bool) {
|
||||
func runTestAPICiphers(t *testing.T, testID int, kubePort int, clientCiphers []uint16, expectedError bool) {
|
||||
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
@ -156,14 +74,13 @@ func runTestAPICiphers(t *testing.T, testID int, kubePort uint32, clientCiphers
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp, err := client.Do(req)
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
|
||||
if expectedError == true && err == nil {
|
||||
t.Fatalf("%d: expecting error for cipher test, client cipher is supported and it should't", testID)
|
||||
} else if err != nil && expectedError == false {
|
||||
t.Fatalf("%d: not expecting error by client with cipher failed: %+v", testID, err)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/test/integration/util/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/test/integration/util/BUILD
generated
vendored
@ -8,16 +8,22 @@ load(
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cloud.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/util",
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce/cloud:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/oauth2:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
|
72
vendor/k8s.io/kubernetes/test/integration/util/cloud.go
generated
vendored
Normal file
72
vendor/k8s.io/kubernetes/test/integration/util/cloud.go
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
)
|
||||
|
||||
const (
|
||||
// TestProjectID is the project id used for creating NewMockGCECloud
|
||||
TestProjectID = "test-project"
|
||||
// TestNetworkProjectID is the network project id for creating NewMockGCECloud
|
||||
TestNetworkProjectID = "net-test-project"
|
||||
// TestRegion is the region for creating NewMockGCECloud
|
||||
TestRegion = "test-region"
|
||||
// TestZone is the zone for creating NewMockGCECloud
|
||||
TestZone = "test-zone"
|
||||
// TestNetworkName is the network name for creating NewMockGCECloud
|
||||
TestNetworkName = "test-network"
|
||||
// TestSubnetworkName is the sub network name for creating NewMockGCECloud
|
||||
TestSubnetworkName = "test-sub-network"
|
||||
// TestSecondaryRangeName is the secondary range name for creating NewMockGCECloud
|
||||
TestSecondaryRangeName = "test-secondary-range"
|
||||
)
|
||||
|
||||
type mockTokenSource struct{}
|
||||
|
||||
func (*mockTokenSource) Token() (*oauth2.Token, error) {
|
||||
return &oauth2.Token{
|
||||
AccessToken: "access",
|
||||
TokenType: "Bearer",
|
||||
RefreshToken: "refresh",
|
||||
Expiry: time.Now().Add(1 * time.Hour),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewMockGCECloud returns a handle to a GCECloud instance that is
|
||||
// served by a mock http server
|
||||
func NewMockGCECloud(cloud cloud.Cloud) (*gce.GCECloud, error) {
|
||||
config := &gce.CloudConfig{
|
||||
ProjectID: TestProjectID,
|
||||
NetworkProjectID: TestNetworkProjectID,
|
||||
Region: TestRegion,
|
||||
Zone: TestZone,
|
||||
ManagedZones: []string{TestZone},
|
||||
NetworkName: TestNetworkName,
|
||||
SubnetworkName: TestSubnetworkName,
|
||||
SecondaryRangeName: TestSecondaryRangeName,
|
||||
NodeTags: []string{},
|
||||
UseMetadataServer: false,
|
||||
TokenSource: &mockTokenSource{},
|
||||
}
|
||||
return gce.CreateGCECloudWithCloud(config, cloud)
|
||||
}
|
50
vendor/k8s.io/kubernetes/test/integration/util/util.go
generated
vendored
50
vendor/k8s.io/kubernetes/test/integration/util/util.go
generated
vendored
@ -22,11 +22,13 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientv1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
@ -56,29 +58,14 @@ func StartApiserver() (string, ShutdownFunc) {
|
||||
// StartScheduler configures and starts a scheduler given a handle to the clientSet interface
|
||||
// and event broadcaster. It returns a handle to the configurator for the running scheduler
|
||||
// and the shutdown function to stop it.
|
||||
func StartScheduler(clientSet clientset.Interface, enableEquivalenceCache bool) (scheduler.Configurator, ShutdownFunc) {
|
||||
func StartScheduler(clientSet clientset.Interface) (scheduler.Configurator, ShutdownFunc) {
|
||||
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
|
||||
evtBroadcaster := record.NewBroadcaster()
|
||||
evtWatch := evtBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{
|
||||
Interface: clientv1core.New(clientSet.CoreV1().RESTClient()).Events("")})
|
||||
Interface: clientSet.CoreV1().Events("")})
|
||||
|
||||
schedulerConfigurator := factory.NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
clientSet,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
enableEquivalenceCache,
|
||||
)
|
||||
schedulerConfigurator := createSchedulerConfigurator(clientSet, informerFactory)
|
||||
|
||||
sched, err := scheduler.NewFromConfigurator(schedulerConfigurator, func(conf *scheduler.Config) {
|
||||
conf.Recorder = evtBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "scheduler"})
|
||||
@ -101,3 +88,30 @@ func StartScheduler(clientSet clientset.Interface, enableEquivalenceCache bool)
|
||||
}
|
||||
return schedulerConfigurator, shutdownFunc
|
||||
}
|
||||
|
||||
// createSchedulerConfigurator create a configurator for scheduler with given informer factory and default name.
|
||||
func createSchedulerConfigurator(
|
||||
clientSet clientset.Interface,
|
||||
informerFactory informers.SharedInformerFactory,
|
||||
) scheduler.Configurator {
|
||||
// Enable EnableEquivalenceClassCache for all integration tests.
|
||||
utilfeature.DefaultFeatureGate.Set("EnableEquivalenceClassCache=true")
|
||||
|
||||
return factory.NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
clientSet,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.EnableEquivalenceClassCache),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/volume/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/volume/BUILD
generated
vendored
@ -16,7 +16,6 @@ go_test(
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/cloudprovider/providers/fake:go_default_library",
|
||||
"//pkg/controller/volume/attachdetach:go_default_library",
|
||||
"//pkg/controller/volume/attachdetach/cache:go_default_library",
|
||||
@ -30,6 +29,7 @@ go_test(
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
|
4
vendor/k8s.io/kubernetes/test/integration/volume/attach_detach_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/volume/attach_detach_test.go
generated
vendored
@ -23,12 +23,12 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach"
|
||||
volumecache "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
@ -323,7 +323,7 @@ func waitForPodFuncInDSWP(t *testing.T, dswp volumecache.DesiredStateOfWorld, ch
|
||||
func createAdClients(ns *v1.Namespace, t *testing.T, server *httptest.Server, syncPeriod time.Duration) (*clientset.Clientset, attachdetach.AttachDetachController, informers.SharedInformerFactory) {
|
||||
config := restclient.Config{
|
||||
Host: server.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()},
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}},
|
||||
QPS: 1000000,
|
||||
Burst: 1000000,
|
||||
}
|
||||
|
7
vendor/k8s.io/kubernetes/test/integration/volume/persistent_volumes_test.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/integration/volume/persistent_volumes_test.go
generated
vendored
@ -35,7 +35,6 @@ import (
|
||||
restclient "k8s.io/client-go/rest"
|
||||
ref "k8s.io/client-go/tools/reference"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
persistentvolumecontroller "k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
@ -43,6 +42,7 @@ import (
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// Several tests in this file are configurable by environment variables:
|
||||
@ -1099,13 +1099,13 @@ func createClients(ns *v1.Namespace, t *testing.T, s *httptest.Server, syncPerio
|
||||
// creates many objects and default values were too low.
|
||||
binderClient := clientset.NewForConfigOrDie(&restclient.Config{
|
||||
Host: s.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()},
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}},
|
||||
QPS: 1000000,
|
||||
Burst: 1000000,
|
||||
})
|
||||
testClient := clientset.NewForConfigOrDie(&restclient.Config{
|
||||
Host: s.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()},
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}},
|
||||
QPS: 1000000,
|
||||
Burst: 1000000,
|
||||
})
|
||||
@ -1136,6 +1136,7 @@ func createClients(ns *v1.Namespace, t *testing.T, s *httptest.Server, syncPerio
|
||||
ClaimInformer: informers.Core().V1().PersistentVolumeClaims(),
|
||||
ClassInformer: informers.Storage().V1().StorageClasses(),
|
||||
PodInformer: informers.Core().V1().Pods(),
|
||||
NodeInformer: informers.Core().V1().Nodes(),
|
||||
EnableDynamicProvisioning: true,
|
||||
})
|
||||
if err != nil {
|
||||
|
Reference in New Issue
Block a user