mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
19
vendor/k8s.io/kubernetes/test/integration/apiserver/BUILD
generated
vendored
19
vendor/k8s.io/kubernetes/test/integration/apiserver/BUILD
generated
vendored
@ -12,30 +12,47 @@ go_test(
|
||||
"apiserver_test.go",
|
||||
"main_test.go",
|
||||
"patch_test.go",
|
||||
"print_test.go",
|
||||
],
|
||||
tags = [
|
||||
"etcd",
|
||||
"integration",
|
||||
],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/kubectl/cmd/util:go_default_library",
|
||||
"//pkg/kubectl/genericclioptions:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//pkg/printers:go_default_library",
|
||||
"//pkg/printers/internalversion:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/pborman/uuid:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v2alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/scheduling/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/settings/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/endpoints/handlers:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/features:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/pager:go_default_library",
|
||||
"//vendor/k8s.io/gengo/examples/set-gen/sets:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
93
vendor/k8s.io/kubernetes/test/integration/apiserver/apiserver_test.go
generated
vendored
93
vendor/k8s.io/kubernetes/test/integration/apiserver/apiserver_test.go
generated
vendored
@ -32,6 +32,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
genericfeatures "k8s.io/apiserver/pkg/features"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -39,12 +40,17 @@ import (
|
||||
"k8s.io/client-go/tools/pager"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/master"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func setup(t *testing.T) (*httptest.Server, clientset.Interface, framework.CloseFunc) {
|
||||
func setup(t *testing.T, groupVersions ...schema.GroupVersion) (*httptest.Server, clientset.Interface, framework.CloseFunc) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.ExtraConfig.EnableCoreControllers = false
|
||||
if len(groupVersions) > 0 {
|
||||
resourceConfig := master.DefaultAPIResourceConfigSource()
|
||||
resourceConfig.EnableVersions(groupVersions...)
|
||||
masterConfig.ExtraConfig.APIResourceConfigSource = resourceConfig
|
||||
}
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
|
||||
clientSet, err := clientset.NewForConfig(&restclient.Config{Host: s.URL})
|
||||
@ -232,3 +238,86 @@ func TestAPIListChunking(t *testing.T) {
|
||||
t.Errorf("unexpected items: %#v", list)
|
||||
}
|
||||
}
|
||||
|
||||
func makeSecret(name string) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"key": []byte("value"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestNameInFieldSelector(t *testing.T) {
|
||||
s, clientSet, closeFn := setup(t)
|
||||
defer closeFn()
|
||||
|
||||
numNamespaces := 3
|
||||
namespaces := make([]*v1.Namespace, 0, numNamespaces)
|
||||
for i := 0; i < 3; i++ {
|
||||
ns := framework.CreateTestingNamespace(fmt.Sprintf("ns%d", i), s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
namespaces = append(namespaces, ns)
|
||||
|
||||
_, err := clientSet.CoreV1().Secrets(ns.Name).Create(makeSecret("foo"))
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't create secret: %v", err)
|
||||
}
|
||||
_, err = clientSet.CoreV1().Secrets(ns.Name).Create(makeSecret("bar"))
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't create secret: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
testcases := []struct {
|
||||
namespace string
|
||||
selector string
|
||||
expectedSecrets int
|
||||
}{
|
||||
{
|
||||
namespace: "",
|
||||
selector: "metadata.name=foo",
|
||||
expectedSecrets: numNamespaces,
|
||||
},
|
||||
{
|
||||
namespace: "",
|
||||
selector: "metadata.name=foo,metadata.name=bar",
|
||||
expectedSecrets: 0,
|
||||
},
|
||||
{
|
||||
namespace: "",
|
||||
selector: "metadata.name=foo,metadata.namespace=ns1",
|
||||
expectedSecrets: 1,
|
||||
},
|
||||
{
|
||||
namespace: "ns1",
|
||||
selector: "metadata.name=foo,metadata.namespace=ns1",
|
||||
expectedSecrets: 1,
|
||||
},
|
||||
{
|
||||
namespace: "ns1",
|
||||
selector: "metadata.name=foo,metadata.namespace=ns2",
|
||||
expectedSecrets: 0,
|
||||
},
|
||||
{
|
||||
namespace: "ns1",
|
||||
selector: "metadata.name=foo,metadata.namespace=",
|
||||
expectedSecrets: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
opts := metav1.ListOptions{
|
||||
FieldSelector: tc.selector,
|
||||
}
|
||||
secrets, err := clientSet.CoreV1().Secrets(tc.namespace).List(opts)
|
||||
if err != nil {
|
||||
t.Errorf("%s: Unexpected error: %v", tc.selector, err)
|
||||
}
|
||||
if len(secrets.Items) != tc.expectedSecrets {
|
||||
t.Errorf("%s: Unexpected number of secrets: %d, expected: %d", tc.selector, len(secrets.Items), tc.expectedSecrets)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
68
vendor/k8s.io/kubernetes/test/integration/apiserver/patch_test.go
generated
vendored
68
vendor/k8s.io/kubernetes/test/integration/apiserver/patch_test.go
generated
vendored
@ -24,18 +24,15 @@ import (
|
||||
|
||||
"github.com/pborman/uuid"
|
||||
|
||||
"reflect"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
// Tests that the apiserver retries non-overlapping conflicts on patches
|
||||
// Tests that the apiserver retries patches
|
||||
func TestPatchConflicts(t *testing.T) {
|
||||
s, clientSet, closeFn := setup(t)
|
||||
defer closeFn()
|
||||
@ -43,28 +40,41 @@ func TestPatchConflicts(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("status-code", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
// Create the object we're going to conflict on
|
||||
clientSet.CoreV1().Secrets(ns.Name).Create(&v1.Secret{
|
||||
numOfConcurrentPatches := 100
|
||||
|
||||
UIDs := make([]types.UID, numOfConcurrentPatches)
|
||||
ownerRefs := []metav1.OwnerReference{}
|
||||
for i := 0; i < numOfConcurrentPatches; i++ {
|
||||
uid := types.UID(uuid.NewRandom().String())
|
||||
ownerName := fmt.Sprintf("owner-%d", i)
|
||||
UIDs[i] = uid
|
||||
ownerRefs = append(ownerRefs, metav1.OwnerReference{
|
||||
APIVersion: "example.com/v1",
|
||||
Kind: "Foo",
|
||||
Name: ownerName,
|
||||
UID: uid,
|
||||
})
|
||||
}
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
// Populate annotations so the strategic patch descends, compares, and notices the $patch directive
|
||||
Annotations: map[string]string{"initial": "value"},
|
||||
Name: "test",
|
||||
OwnerReferences: ownerRefs,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Create the object we're going to conflict on
|
||||
clientSet.CoreV1().Secrets(ns.Name).Create(secret)
|
||||
client := clientSet.CoreV1().RESTClient()
|
||||
|
||||
successes := int32(0)
|
||||
|
||||
// Run a lot of simultaneous patch operations to exercise internal API server retry of patch application.
|
||||
// Internally, a patch API call retries up to MaxRetryWhenPatchConflicts times if the resource version of the object has changed.
|
||||
// If the resource version of the object changed between attempts, that means another one of our patch requests succeeded.
|
||||
// That means if we run 2*MaxRetryWhenPatchConflicts patch attempts, we should see at least MaxRetryWhenPatchConflicts succeed.
|
||||
// Run a lot of simultaneous patch operations to exercise internal API server retry of application of patches that do not specify resourceVersion.
|
||||
// They should all succeed.
|
||||
wg := sync.WaitGroup{}
|
||||
for i := 0; i < (2 * handlers.MaxRetryWhenPatchConflicts); i++ {
|
||||
for i := 0; i < numOfConcurrentPatches; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
annotationName := fmt.Sprintf("annotation-%d", i)
|
||||
labelName := fmt.Sprintf("label-%d", i)
|
||||
value := uuid.NewRandom().String()
|
||||
|
||||
@ -72,7 +82,7 @@ func TestPatchConflicts(t *testing.T) {
|
||||
Namespace(ns.Name).
|
||||
Resource("secrets").
|
||||
Name("test").
|
||||
Body([]byte(fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}, "annotations":{"$patch":"replace","%s":"%s"}}}`, labelName, value, annotationName, value))).
|
||||
Body([]byte(fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}, "ownerReferences":[{"$patch":"delete","uid":"%s"}]}}`, labelName, value, UIDs[i]))).
|
||||
Do().
|
||||
Get()
|
||||
|
||||
@ -95,9 +105,14 @@ func TestPatchConflicts(t *testing.T) {
|
||||
t.Errorf("patch of %s was ineffective, expected %s=%s, got labels %#v", "secrets", labelName, value, accessor.GetLabels())
|
||||
return
|
||||
}
|
||||
// make sure the patch directive didn't get lost, and that the entire annotation map was replaced
|
||||
if !reflect.DeepEqual(accessor.GetAnnotations(), map[string]string{annotationName: value}) {
|
||||
t.Errorf("patch of %s with $patch directive was ineffective, didn't replace entire annotations map: %#v", "secrets", accessor.GetAnnotations())
|
||||
// make sure the patch directive didn't get lost, and that an entry in the ownerReference list was deleted.
|
||||
found := findOwnerRefByUID(accessor.GetOwnerReferences(), UIDs[i])
|
||||
if err != nil {
|
||||
t.Errorf("%v", err)
|
||||
return
|
||||
}
|
||||
if found {
|
||||
t.Errorf("patch of %s with $patch directive was ineffective, didn't delete the entry in the ownerReference slice: %#v", "secrets", UIDs[i])
|
||||
}
|
||||
|
||||
atomic.AddInt32(&successes, 1)
|
||||
@ -105,10 +120,19 @@ func TestPatchConflicts(t *testing.T) {
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
if successes < handlers.MaxRetryWhenPatchConflicts {
|
||||
t.Errorf("Expected at least %d successful patches for %s, got %d", handlers.MaxRetryWhenPatchConflicts, "secrets", successes)
|
||||
if successes < int32(numOfConcurrentPatches) {
|
||||
t.Errorf("Expected at least %d successful patches for %s, got %d", numOfConcurrentPatches, "secrets", successes)
|
||||
} else {
|
||||
t.Logf("Got %d successful patches for %s", successes, "secrets")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func findOwnerRefByUID(ownerRefs []metav1.OwnerReference, uid types.UID) bool {
|
||||
for _, of := range ownerRefs {
|
||||
if of.UID == uid {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
304
vendor/k8s.io/kubernetes/test/integration/apiserver/print_test.go
generated
vendored
Normal file
304
vendor/k8s.io/kubernetes/test/integration/apiserver/print_test.go
generated
vendored
Normal file
@ -0,0 +1,304 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apiserver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
batchv2alpha1 "k8s.io/api/batch/v2alpha1"
|
||||
rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
|
||||
schedulerapi "k8s.io/api/scheduling/v1beta1"
|
||||
settingsv1alpha1 "k8s.io/api/settings/v1alpha1"
|
||||
storagev1alpha1 "k8s.io/api/storage/v1alpha1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/gengo/examples/set-gen/sets"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
"k8s.io/kubernetes/pkg/kubectl/genericclioptions"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
var kindWhiteList = sets.NewString(
|
||||
// k8s.io/api/core
|
||||
"APIGroup",
|
||||
"APIVersions",
|
||||
"Binding",
|
||||
"DeleteOptions",
|
||||
"ExportOptions",
|
||||
"GetOptions",
|
||||
"ListOptions",
|
||||
"NodeProxyOptions",
|
||||
"PodAttachOptions",
|
||||
"PodExecOptions",
|
||||
"PodPortForwardOptions",
|
||||
"PodLogOptions",
|
||||
"PodProxyOptions",
|
||||
"PodStatusResult",
|
||||
"RangeAllocation",
|
||||
"ServiceProxyOptions",
|
||||
"SerializedReference",
|
||||
// --
|
||||
|
||||
// k8s.io/api/admission
|
||||
"AdmissionReview",
|
||||
// --
|
||||
|
||||
// k8s.io/api/admissionregistration
|
||||
"InitializerConfiguration",
|
||||
// --
|
||||
|
||||
// k8s.io/api/authentication
|
||||
"TokenRequest",
|
||||
"TokenReview",
|
||||
// --
|
||||
|
||||
// k8s.io/api/authorization
|
||||
"LocalSubjectAccessReview",
|
||||
"SelfSubjectAccessReview",
|
||||
"SelfSubjectRulesReview",
|
||||
"SubjectAccessReview",
|
||||
// --
|
||||
|
||||
// k8s.io/api/autoscaling
|
||||
"Scale",
|
||||
// --
|
||||
|
||||
// k8s.io/api/apps
|
||||
"DeploymentRollback",
|
||||
// --
|
||||
|
||||
// k8s.io/api/batch
|
||||
"JobTemplate",
|
||||
// --
|
||||
|
||||
// k8s.io/api/extensions
|
||||
"ReplicationControllerDummy",
|
||||
// --
|
||||
|
||||
// k8s.io/api/imagepolicy
|
||||
"ImageReview",
|
||||
// --
|
||||
|
||||
// k8s.io/api/policy
|
||||
"Eviction",
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/componentconfig
|
||||
"KubeSchedulerConfiguration",
|
||||
// --
|
||||
|
||||
// k8s.io/apimachinery/pkg/apis/meta
|
||||
"WatchEvent",
|
||||
"Status",
|
||||
// --
|
||||
)
|
||||
|
||||
// TODO (soltysh): this list has to go down to 0!
|
||||
var missingHanlders = sets.NewString(
|
||||
"ClusterRole",
|
||||
"LimitRange",
|
||||
"MutatingWebhookConfiguration",
|
||||
"ResourceQuota",
|
||||
"Role",
|
||||
"ValidatingWebhookConfiguration",
|
||||
"VolumeAttachment",
|
||||
"PriorityClass",
|
||||
"PodPreset",
|
||||
)
|
||||
|
||||
func TestServerSidePrint(t *testing.T) {
|
||||
s, _, closeFn := setup(t,
|
||||
// additional groupversions needed for the test to run
|
||||
batchv2alpha1.SchemeGroupVersion,
|
||||
rbacv1alpha1.SchemeGroupVersion,
|
||||
settingsv1alpha1.SchemeGroupVersion,
|
||||
schedulerapi.SchemeGroupVersion,
|
||||
storagev1alpha1.SchemeGroupVersion)
|
||||
defer closeFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("server-print", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
tableParam := fmt.Sprintf("application/json;as=Table;g=%s;v=%s, application/json", metav1beta1.GroupName, metav1beta1.SchemeGroupVersion.Version)
|
||||
printer := newFakePrinter(printersinternal.AddHandlers)
|
||||
|
||||
configFlags := genericclioptions.NewTestConfigFlags().
|
||||
WithClientConfig(clientcmd.NewDefaultClientConfig(*createKubeConfig(s.URL), &clientcmd.ConfigOverrides{}))
|
||||
|
||||
restConfig, err := configFlags.ToRESTConfig()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
cacheDir, err := ioutil.TempDir(os.TempDir(), "test-integration-apiserver-print")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
os.Remove(cacheDir)
|
||||
}()
|
||||
|
||||
cachedClient, err := discovery.NewCachedDiscoveryClientForConfig(restConfig, cacheDir, "", time.Duration(10*time.Minute))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
configFlags.WithDiscoveryClient(cachedClient)
|
||||
|
||||
factory := util.NewFactory(configFlags)
|
||||
mapper, err := factory.ToRESTMapper()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error getting mapper: %v", err)
|
||||
return
|
||||
}
|
||||
for gvk, apiType := range legacyscheme.Scheme.AllKnownTypes() {
|
||||
// we do not care about internal objects or lists // TODO make sure this is always true
|
||||
if gvk.Version == runtime.APIVersionInternal || strings.HasSuffix(apiType.Name(), "List") {
|
||||
continue
|
||||
}
|
||||
if kindWhiteList.Has(gvk.Kind) || missingHanlders.Has(gvk.Kind) {
|
||||
continue
|
||||
}
|
||||
|
||||
t.Logf("Checking %s", gvk)
|
||||
// read table definition as returned by the server
|
||||
mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error getting mapping for GVK %s: %v", gvk, err)
|
||||
continue
|
||||
}
|
||||
client, err := factory.ClientForMapping(mapping)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error getting client for GVK %s: %v", gvk, err)
|
||||
continue
|
||||
}
|
||||
req := client.Get()
|
||||
if mapping.Scope.Name() == meta.RESTScopeNameNamespace {
|
||||
req = req.Namespace(ns.Name)
|
||||
}
|
||||
body, err := req.Resource(mapping.Resource.Resource).SetHeader("Accept", tableParam).Do().Raw()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error getting %s: %v", gvk, err)
|
||||
continue
|
||||
}
|
||||
actual, err := decodeIntoTable(body)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error decoding %s: %v", gvk, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// get table definition used in printers
|
||||
obj, err := legacyscheme.Scheme.New(gvk)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error creating %s: %v", gvk, err)
|
||||
continue
|
||||
}
|
||||
intGV := gvk.GroupKind().WithVersion(runtime.APIVersionInternal).GroupVersion()
|
||||
intObj, err := legacyscheme.Scheme.ConvertToVersion(obj, intGV)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error converting %s to internal: %v", gvk, err)
|
||||
continue
|
||||
}
|
||||
expectedColumnDefinitions, ok := printer.handlers[reflect.TypeOf(intObj)]
|
||||
if !ok {
|
||||
t.Errorf("missing handler for type %v", gvk)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, e := range expectedColumnDefinitions {
|
||||
for _, a := range actual.ColumnDefinitions {
|
||||
if a.Name == e.Name && !reflect.DeepEqual(a, e) {
|
||||
t.Errorf("unexpected difference in column definition %s for %s:\nexpected:\n%#v\nactual:\n%#v\n", e.Name, gvk, e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type fakePrinter struct {
|
||||
handlers map[reflect.Type][]metav1beta1.TableColumnDefinition
|
||||
}
|
||||
|
||||
var _ printers.PrintHandler = &fakePrinter{}
|
||||
|
||||
func (f *fakePrinter) Handler(columns, columnsWithWide []string, printFunc interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakePrinter) TableHandler(columns []metav1beta1.TableColumnDefinition, printFunc interface{}) error {
|
||||
printFuncValue := reflect.ValueOf(printFunc)
|
||||
objType := printFuncValue.Type().In(0)
|
||||
f.handlers[objType] = columns
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakePrinter) DefaultTableHandler(columns []metav1beta1.TableColumnDefinition, printFunc interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newFakePrinter(fns ...func(printers.PrintHandler)) *fakePrinter {
|
||||
handlers := make(map[reflect.Type][]metav1beta1.TableColumnDefinition, len(fns))
|
||||
p := &fakePrinter{handlers: handlers}
|
||||
for _, fn := range fns {
|
||||
fn(p)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func decodeIntoTable(body []byte) (*metav1beta1.Table, error) {
|
||||
table := &metav1beta1.Table{}
|
||||
err := json.Unmarshal(body, table)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return table, nil
|
||||
}
|
||||
|
||||
func createKubeConfig(url string) *clientcmdapi.Config {
|
||||
clusterNick := "cluster"
|
||||
userNick := "user"
|
||||
contextNick := "context"
|
||||
|
||||
config := clientcmdapi.NewConfig()
|
||||
|
||||
cluster := clientcmdapi.NewCluster()
|
||||
cluster.Server = url
|
||||
cluster.InsecureSkipTLSVerify = true
|
||||
config.Clusters[clusterNick] = cluster
|
||||
|
||||
context := clientcmdapi.NewContext()
|
||||
context.Cluster = clusterNick
|
||||
context.AuthInfo = userNick
|
||||
config.Contexts[contextNick] = context
|
||||
config.CurrentContext = contextNick
|
||||
|
||||
return config
|
||||
}
|
Reference in New Issue
Block a user