mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor updates
This commit is contained in:
6
vendor/k8s.io/kubernetes/test/integration/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/test/integration/BUILD
generated
vendored
@ -13,9 +13,12 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration",
|
||||
deps = [
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/transport:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
],
|
||||
@ -34,6 +37,7 @@ filegroup(
|
||||
":package-srcs",
|
||||
"//test/integration/apiserver:all-srcs",
|
||||
"//test/integration/auth:all-srcs",
|
||||
"//test/integration/benchmark/jsonify:all-srcs",
|
||||
"//test/integration/client:all-srcs",
|
||||
"//test/integration/configmap:all-srcs",
|
||||
"//test/integration/daemonset:all-srcs",
|
||||
@ -58,7 +62,9 @@ filegroup(
|
||||
"//test/integration/secrets:all-srcs",
|
||||
"//test/integration/serviceaccount:all-srcs",
|
||||
"//test/integration/storageclasses:all-srcs",
|
||||
"//test/integration/tls:all-srcs",
|
||||
"//test/integration/ttlcontroller:all-srcs",
|
||||
"//test/integration/util:all-srcs",
|
||||
"//test/integration/volume:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
|
1
vendor/k8s.io/kubernetes/test/integration/apiserver/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/integration/apiserver/BUILD
generated
vendored
@ -13,7 +13,6 @@ go_test(
|
||||
"main_test.go",
|
||||
"patch_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/apiserver",
|
||||
tags = [
|
||||
"etcd",
|
||||
"integration",
|
||||
|
4
vendor/k8s.io/kubernetes/test/integration/apiserver/apiserver_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/apiserver/apiserver_test.go
generated
vendored
@ -123,7 +123,7 @@ func Test202StatusCode(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("status-code", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
rsClient := clientSet.Extensions().ReplicaSets(ns.Name)
|
||||
rsClient := clientSet.ExtensionsV1beta1().ReplicaSets(ns.Name)
|
||||
|
||||
// 1. Create the resource without any finalizer and then delete it without setting DeleteOptions.
|
||||
// Verify that server returns 200 in this case.
|
||||
@ -173,7 +173,7 @@ func TestAPIListChunking(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("list-paging", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
rsClient := clientSet.Extensions().ReplicaSets(ns.Name)
|
||||
rsClient := clientSet.ExtensionsV1beta1().ReplicaSets(ns.Name)
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
rs := newRS(ns.Name)
|
||||
|
13
vendor/k8s.io/kubernetes/test/integration/auth/BUILD
generated
vendored
13
vendor/k8s.io/kubernetes/test/integration/auth/BUILD
generated
vendored
@ -15,8 +15,8 @@ go_test(
|
||||
"main_test.go",
|
||||
"node_test.go",
|
||||
"rbac_test.go",
|
||||
"svcaccttoken_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/auth",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
@ -29,9 +29,9 @@ go_test(
|
||||
"//pkg/apis/rbac:go_default_library",
|
||||
"//pkg/auth/authorizer/abac:go_default_library",
|
||||
"//pkg/auth/nodeidentifier:go_default_library",
|
||||
"//pkg/bootstrap/api:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//pkg/controller/serviceaccount:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubeapiserver/authorizer:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
@ -43,6 +43,7 @@ go_test(
|
||||
"//pkg/registry/rbac/role/storage:go_default_library",
|
||||
"//pkg/registry/rbac/rolebinding:go_default_library",
|
||||
"//pkg/registry/rbac/rolebinding/storage:go_default_library",
|
||||
"//pkg/serviceaccount:go_default_library",
|
||||
"//plugin/pkg/admission/admit:go_default_library",
|
||||
"//plugin/pkg/admission/noderestriction:go_default_library",
|
||||
"//plugin/pkg/auth/authenticator/token/bootstrap:go_default_library",
|
||||
@ -51,13 +52,17 @@ go_test(
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/authentication/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/group:go_default_library",
|
||||
@ -72,9 +77,13 @@ go_test(
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/tokentest:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/bootstrap/token/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/transport:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/cert:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
12
vendor/k8s.io/kubernetes/test/integration/auth/accessreview_test.go
generated
vendored
12
vendor/k8s.io/kubernetes/test/integration/auth/accessreview_test.go
generated
vendored
@ -55,8 +55,8 @@ func alwaysAlice(req *http.Request) (user.Info, bool, error) {
|
||||
|
||||
func TestSubjectAccessReview(t *testing.T) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authenticator = authenticator.RequestFunc(alwaysAlice)
|
||||
masterConfig.GenericConfig.Authorizer = sarAuthorizer{}
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = authenticator.RequestFunc(alwaysAlice)
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = sarAuthorizer{}
|
||||
masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
@ -147,10 +147,10 @@ func TestSubjectAccessReview(t *testing.T) {
|
||||
func TestSelfSubjectAccessReview(t *testing.T) {
|
||||
username := "alice"
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authenticator = authenticator.RequestFunc(func(req *http.Request) (user.Info, bool, error) {
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = authenticator.RequestFunc(func(req *http.Request) (user.Info, bool, error) {
|
||||
return &user.DefaultInfo{Name: username}, true, nil
|
||||
})
|
||||
masterConfig.GenericConfig.Authorizer = sarAuthorizer{}
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = sarAuthorizer{}
|
||||
masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
@ -229,8 +229,8 @@ func TestSelfSubjectAccessReview(t *testing.T) {
|
||||
|
||||
func TestLocalSubjectAccessReview(t *testing.T) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authenticator = authenticator.RequestFunc(alwaysAlice)
|
||||
masterConfig.GenericConfig.Authorizer = sarAuthorizer{}
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = authenticator.RequestFunc(alwaysAlice)
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = sarAuthorizer{}
|
||||
masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
46
vendor/k8s.io/kubernetes/test/integration/auth/auth_test.go
generated
vendored
46
vendor/k8s.io/kubernetes/test/integration/auth/auth_test.go
generated
vendored
@ -100,6 +100,10 @@ func pathWithPrefix(prefix, resource, namespace, name string) string {
|
||||
return testapi.Default.ResourcePathWithPrefix(prefix, resource, namespace, name)
|
||||
}
|
||||
|
||||
func pathWithSubResource(resource, namespace, name, subresource string) string {
|
||||
return testapi.Default.SubResourcePath(resource, namespace, name, subresource)
|
||||
}
|
||||
|
||||
func timeoutPath(resource, namespace, name string) string {
|
||||
return addTimeoutFlag(testapi.Default.ResourcePath(resource, namespace, name))
|
||||
}
|
||||
@ -326,7 +330,7 @@ func getTestRequests(namespace string) []struct {
|
||||
// whenever a service is created, but this test does not run that controller)
|
||||
{"POST", timeoutPath("endpoints", namespace, ""), emptyEndpoints, integration.Code201},
|
||||
// Should return service unavailable when endpoint.subset is empty.
|
||||
{"GET", pathWithPrefix("proxy", "services", namespace, "a") + "/", "", integration.Code503},
|
||||
{"GET", pathWithSubResource("services", namespace, "a", "proxy") + "/", "", integration.Code503},
|
||||
{"PUT", timeoutPath("services", namespace, "a"), aService, integration.Code200},
|
||||
{"GET", path("services", namespace, "a"), "", integration.Code200},
|
||||
{"DELETE", timeoutPath("endpoints", namespace, "a"), "", integration.Code200},
|
||||
@ -379,7 +383,7 @@ func getTestRequests(namespace string) []struct {
|
||||
{"DELETE", timeoutPath("foo", namespace, ""), "", integration.Code404},
|
||||
|
||||
// Special verbs on nodes
|
||||
{"GET", pathWithPrefix("proxy", "nodes", namespace, "a"), "", integration.Code404},
|
||||
{"GET", pathWithSubResource("nodes", namespace, "a", "proxy"), "", integration.Code404},
|
||||
{"GET", pathWithPrefix("redirect", "nodes", namespace, "a"), "", integration.Code404},
|
||||
// TODO: test .../watch/..., which doesn't end before the test timeout.
|
||||
// TODO: figure out how to create a node so that it can successfully proxy/redirect.
|
||||
@ -500,7 +504,7 @@ func getPreviousResourceVersionKey(url, id string) string {
|
||||
func TestAuthModeAlwaysDeny(t *testing.T) {
|
||||
// Set up a master
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authorizer = authorizerfactory.NewAlwaysDenyAuthorizer()
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = authorizerfactory.NewAlwaysDenyAuthorizer()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
@ -549,8 +553,8 @@ func TestAliceNotForbiddenOrUnauthorized(t *testing.T) {
|
||||
|
||||
// Set up a master
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authenticator = getTestTokenAuth()
|
||||
masterConfig.GenericConfig.Authorizer = allowAliceAuthorizer{}
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = getTestTokenAuth()
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = allowAliceAuthorizer{}
|
||||
masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
@ -619,8 +623,8 @@ func TestAliceNotForbiddenOrUnauthorized(t *testing.T) {
|
||||
func TestBobIsForbidden(t *testing.T) {
|
||||
// This file has alice and bob in it.
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authenticator = getTestTokenAuth()
|
||||
masterConfig.GenericConfig.Authorizer = allowAliceAuthorizer{}
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = getTestTokenAuth()
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = allowAliceAuthorizer{}
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
@ -663,8 +667,8 @@ func TestUnknownUserIsUnauthorized(t *testing.T) {
|
||||
|
||||
// Set up a master
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authenticator = getTestTokenAuth()
|
||||
masterConfig.GenericConfig.Authorizer = allowAliceAuthorizer{}
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = getTestTokenAuth()
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = allowAliceAuthorizer{}
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
@ -725,8 +729,8 @@ func (impersonateAuthorizer) Authorize(a authorizer.Attributes) (authorizer.Deci
|
||||
func TestImpersonateIsForbidden(t *testing.T) {
|
||||
// Set up a master
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authenticator = getTestTokenAuth()
|
||||
masterConfig.GenericConfig.Authorizer = impersonateAuthorizer{}
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = getTestTokenAuth()
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = impersonateAuthorizer{}
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
@ -872,8 +876,8 @@ func TestAuthorizationAttributeDetermination(t *testing.T) {
|
||||
|
||||
// Set up a master
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authenticator = getTestTokenAuth()
|
||||
masterConfig.GenericConfig.Authorizer = trackingAuthorizer
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = getTestTokenAuth()
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = trackingAuthorizer
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
@ -938,8 +942,8 @@ func TestNamespaceAuthorization(t *testing.T) {
|
||||
|
||||
// Set up a master
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authenticator = getTestTokenAuth()
|
||||
masterConfig.GenericConfig.Authorizer = a
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = getTestTokenAuth()
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = a
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
@ -1036,8 +1040,8 @@ func TestKindAuthorization(t *testing.T) {
|
||||
|
||||
// Set up a master
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authenticator = getTestTokenAuth()
|
||||
masterConfig.GenericConfig.Authorizer = a
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = getTestTokenAuth()
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = a
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
@ -1120,8 +1124,8 @@ func TestReadOnlyAuthorization(t *testing.T) {
|
||||
|
||||
// Set up a master
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authenticator = getTestTokenAuth()
|
||||
masterConfig.GenericConfig.Authorizer = a
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = getTestTokenAuth()
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = a
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
@ -1179,8 +1183,8 @@ func TestWebhookTokenAuthenticator(t *testing.T) {
|
||||
|
||||
// Set up a master
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authenticator = authenticator
|
||||
masterConfig.GenericConfig.Authorizer = allowAliceAuthorizer{}
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = authenticator
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = allowAliceAuthorizer{}
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/test/integration/auth/bootstraptoken_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/auth/bootstraptoken_test.go
generated
vendored
@ -27,8 +27,8 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
|
||||
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/admit"
|
||||
"k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap"
|
||||
bootstraputil "k8s.io/kubernetes/test/e2e/lifecycle/bootstrap"
|
||||
@ -125,7 +125,7 @@ func TestBootstrapTokenAuth(t *testing.T) {
|
||||
authenticator := bearertoken.New(bootstrap.NewTokenAuthenticator(bootstrapSecrets{test.secret}))
|
||||
// Set up a master
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authenticator = authenticator
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = authenticator
|
||||
masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
335
vendor/k8s.io/kubernetes/test/integration/auth/node_test.go
generated
vendored
335
vendor/k8s.io/kubernetes/test/integration/auth/node_test.go
generated
vendored
@ -20,20 +20,22 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
|
||||
"k8s.io/apiserver/pkg/authentication/token/tokenfile"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
versionedinformers "k8s.io/client-go/informers"
|
||||
externalclientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
@ -72,13 +74,18 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
|
||||
// Build client config, clientset, and informers
|
||||
clientConfig := &restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}}
|
||||
superuserClient := clientsetForToken(tokenMaster, clientConfig)
|
||||
superuserClient, superuserClientExternal := clientsetForToken(tokenMaster, clientConfig)
|
||||
informerFactory := informers.NewSharedInformerFactory(superuserClient, time.Minute)
|
||||
versionedInformerFactory := versionedinformers.NewSharedInformerFactory(superuserClientExternal, time.Minute)
|
||||
|
||||
// Enabled CSIPersistentVolume feature at startup so volumeattachments get watched
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIPersistentVolume, true)()
|
||||
|
||||
// Set up Node+RBAC authorizer
|
||||
authorizerConfig := &authorizer.AuthorizationConfig{
|
||||
AuthorizationModes: []string{"Node", "RBAC"},
|
||||
InformerFactory: informerFactory,
|
||||
AuthorizationModes: []string{"Node", "RBAC"},
|
||||
InformerFactory: informerFactory,
|
||||
VersionedInformerFactory: versionedInformerFactory,
|
||||
}
|
||||
nodeRBACAuthorizer, _, err := authorizerConfig.New()
|
||||
if err != nil {
|
||||
@ -94,10 +101,10 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
|
||||
// Start the server
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authenticator = authenticator
|
||||
|
||||
masterConfig.GenericConfig.Authorizer = nodeRBACAuthorizer
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = authenticator
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = nodeRBACAuthorizer
|
||||
masterConfig.GenericConfig.AdmissionControl = nodeRestrictionAdmission
|
||||
|
||||
_, _, closeFn := framework.RunAMasterUsingServer(masterConfig, apiServer, h)
|
||||
defer closeFn()
|
||||
|
||||
@ -105,6 +112,7 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informerFactory.Start(stopCh)
|
||||
versionedInformerFactory.Start(stopCh)
|
||||
|
||||
// Wait for a healthy server
|
||||
for {
|
||||
@ -127,6 +135,17 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
if _, err := superuserClient.Core().ConfigMaps("ns").Create(&api.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "myconfigmap"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pvName := "mypv"
|
||||
if _, err := superuserClientExternal.StorageV1beta1().VolumeAttachments().Create(&storagev1beta1.VolumeAttachment{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "myattachment"},
|
||||
Spec: storagev1beta1.VolumeAttachmentSpec{
|
||||
Attacher: "foo",
|
||||
Source: storagev1beta1.VolumeAttachmentSource{PersistentVolumeName: &pvName},
|
||||
NodeName: "node2",
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := superuserClient.Core().PersistentVolumeClaims("ns").Create(&api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "mypvc"},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
@ -149,130 +168,170 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
getSecret := func(client clientset.Interface) error {
|
||||
_, err := client.Core().Secrets("ns").Get("mysecret", metav1.GetOptions{})
|
||||
return err
|
||||
getSecret := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.Core().Secrets("ns").Get("mysecret", metav1.GetOptions{})
|
||||
return err
|
||||
}
|
||||
}
|
||||
getPVSecret := func(client clientset.Interface) error {
|
||||
_, err := client.Core().Secrets("ns").Get("mypvsecret", metav1.GetOptions{})
|
||||
return err
|
||||
getPVSecret := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.Core().Secrets("ns").Get("mypvsecret", metav1.GetOptions{})
|
||||
return err
|
||||
}
|
||||
}
|
||||
getConfigMap := func(client clientset.Interface) error {
|
||||
_, err := client.Core().ConfigMaps("ns").Get("myconfigmap", metav1.GetOptions{})
|
||||
return err
|
||||
getConfigMap := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.Core().ConfigMaps("ns").Get("myconfigmap", metav1.GetOptions{})
|
||||
return err
|
||||
}
|
||||
}
|
||||
getPVC := func(client clientset.Interface) error {
|
||||
_, err := client.Core().PersistentVolumeClaims("ns").Get("mypvc", metav1.GetOptions{})
|
||||
return err
|
||||
getPVC := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.Core().PersistentVolumeClaims("ns").Get("mypvc", metav1.GetOptions{})
|
||||
return err
|
||||
}
|
||||
}
|
||||
getPV := func(client clientset.Interface) error {
|
||||
_, err := client.Core().PersistentVolumes().Get("mypv", metav1.GetOptions{})
|
||||
return err
|
||||
getPV := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.Core().PersistentVolumes().Get("mypv", metav1.GetOptions{})
|
||||
return err
|
||||
}
|
||||
}
|
||||
getVolumeAttachment := func(client externalclientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.StorageV1beta1().VolumeAttachments().Get("myattachment", metav1.GetOptions{})
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
createNode2NormalPod := func(client clientset.Interface) error {
|
||||
_, err := client.Core().Pods("ns").Create(&api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"},
|
||||
Spec: api.PodSpec{
|
||||
NodeName: "node2",
|
||||
Containers: []api.Container{{Name: "image", Image: "busybox"}},
|
||||
Volumes: []api.Volume{
|
||||
{Name: "secret", VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "mysecret"}}},
|
||||
{Name: "cm", VolumeSource: api.VolumeSource{ConfigMap: &api.ConfigMapVolumeSource{LocalObjectReference: api.LocalObjectReference{Name: "myconfigmap"}}}},
|
||||
{Name: "pvc", VolumeSource: api.VolumeSource{PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ClaimName: "mypvc"}}},
|
||||
createNode2NormalPod := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.Core().Pods("ns").Create(&api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"},
|
||||
Spec: api.PodSpec{
|
||||
NodeName: "node2",
|
||||
Containers: []api.Container{{Name: "image", Image: "busybox"}},
|
||||
Volumes: []api.Volume{
|
||||
{Name: "secret", VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "mysecret"}}},
|
||||
{Name: "cm", VolumeSource: api.VolumeSource{ConfigMap: &api.ConfigMapVolumeSource{LocalObjectReference: api.LocalObjectReference{Name: "myconfigmap"}}}},
|
||||
{Name: "pvc", VolumeSource: api.VolumeSource{PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ClaimName: "mypvc"}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
return err
|
||||
})
|
||||
return err
|
||||
}
|
||||
}
|
||||
updateNode2NormalPodStatus := func(client clientset.Interface) error {
|
||||
startTime := metav1.NewTime(time.Now())
|
||||
_, err := client.Core().Pods("ns").UpdateStatus(&api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"},
|
||||
Status: api.PodStatus{StartTime: &startTime},
|
||||
})
|
||||
return err
|
||||
updateNode2NormalPodStatus := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
startTime := metav1.NewTime(time.Now())
|
||||
_, err := client.Core().Pods("ns").UpdateStatus(&api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"},
|
||||
Status: api.PodStatus{StartTime: &startTime},
|
||||
})
|
||||
return err
|
||||
}
|
||||
}
|
||||
deleteNode2NormalPod := func(client clientset.Interface) error {
|
||||
zero := int64(0)
|
||||
return client.Core().Pods("ns").Delete("node2normalpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero})
|
||||
deleteNode2NormalPod := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
zero := int64(0)
|
||||
return client.Core().Pods("ns").Delete("node2normalpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero})
|
||||
}
|
||||
}
|
||||
|
||||
createNode2MirrorPod := func(client clientset.Interface) error {
|
||||
_, err := client.Core().Pods("ns").Create(&api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node2mirrorpod",
|
||||
Annotations: map[string]string{api.MirrorPodAnnotationKey: "true"},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
NodeName: "node2",
|
||||
Containers: []api.Container{{Name: "image", Image: "busybox"}},
|
||||
},
|
||||
})
|
||||
return err
|
||||
createNode2MirrorPod := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.Core().Pods("ns").Create(&api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node2mirrorpod",
|
||||
Annotations: map[string]string{api.MirrorPodAnnotationKey: "true"},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
NodeName: "node2",
|
||||
Containers: []api.Container{{Name: "image", Image: "busybox"}},
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
}
|
||||
deleteNode2MirrorPod := func(client clientset.Interface) error {
|
||||
zero := int64(0)
|
||||
return client.Core().Pods("ns").Delete("node2mirrorpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero})
|
||||
deleteNode2MirrorPod := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
zero := int64(0)
|
||||
return client.Core().Pods("ns").Delete("node2mirrorpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero})
|
||||
}
|
||||
}
|
||||
|
||||
createNode2 := func(client clientset.Interface) error {
|
||||
_, err := client.Core().Nodes().Create(&api.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2"}})
|
||||
return err
|
||||
createNode2 := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.Core().Nodes().Create(&api.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2"}})
|
||||
return err
|
||||
}
|
||||
}
|
||||
updateNode2Status := func(client clientset.Interface) error {
|
||||
_, err := client.Core().Nodes().UpdateStatus(&api.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node2"},
|
||||
Status: api.NodeStatus{},
|
||||
})
|
||||
return err
|
||||
updateNode2Status := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.Core().Nodes().UpdateStatus(&api.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node2"},
|
||||
Status: api.NodeStatus{},
|
||||
})
|
||||
return err
|
||||
}
|
||||
}
|
||||
deleteNode2 := func(client clientset.Interface) error {
|
||||
return client.Core().Nodes().Delete("node2", nil)
|
||||
deleteNode2 := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
return client.Core().Nodes().Delete("node2", nil)
|
||||
}
|
||||
}
|
||||
createNode2NormalPodEviction := func(client clientset.Interface) error {
|
||||
return client.Policy().Evictions("ns").Evict(&policy.Eviction{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "policy/v1beta1",
|
||||
Kind: "Eviction",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node2normalpod",
|
||||
Namespace: "ns",
|
||||
},
|
||||
})
|
||||
createNode2NormalPodEviction := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
return client.Policy().Evictions("ns").Evict(&policy.Eviction{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "policy/v1beta1",
|
||||
Kind: "Eviction",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node2normalpod",
|
||||
Namespace: "ns",
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
createNode2MirrorPodEviction := func(client clientset.Interface) error {
|
||||
return client.Policy().Evictions("ns").Evict(&policy.Eviction{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "policy/v1beta1",
|
||||
Kind: "Eviction",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node2mirrorpod",
|
||||
Namespace: "ns",
|
||||
},
|
||||
})
|
||||
createNode2MirrorPodEviction := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
return client.Policy().Evictions("ns").Evict(&policy.Eviction{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "policy/v1beta1",
|
||||
Kind: "Eviction",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node2mirrorpod",
|
||||
Namespace: "ns",
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
capacity := 50
|
||||
updatePVCCapacity := func(client clientset.Interface) error {
|
||||
capacity++
|
||||
statusString := fmt.Sprintf("{\"status\": {\"capacity\": {\"storage\": \"%dG\"}}}", capacity)
|
||||
patchBytes := []byte(statusString)
|
||||
_, err := client.Core().PersistentVolumeClaims("ns").Patch("mypvc", types.StrategicMergePatchType, patchBytes, "status")
|
||||
return err
|
||||
updatePVCCapacity := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
capacity++
|
||||
statusString := fmt.Sprintf("{\"status\": {\"capacity\": {\"storage\": \"%dG\"}}}", capacity)
|
||||
patchBytes := []byte(statusString)
|
||||
_, err := client.Core().PersistentVolumeClaims("ns").Patch("mypvc", types.StrategicMergePatchType, patchBytes, "status")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
updatePVCPhase := func(client clientset.Interface) error {
|
||||
patchBytes := []byte(`{"status":{"phase": "Bound"}}`)
|
||||
_, err := client.Core().PersistentVolumeClaims("ns").Patch("mypvc", types.StrategicMergePatchType, patchBytes, "status")
|
||||
return err
|
||||
updatePVCPhase := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
patchBytes := []byte(`{"status":{"phase": "Bound"}}`)
|
||||
_, err := client.Core().PersistentVolumeClaims("ns").Patch("mypvc", types.StrategicMergePatchType, patchBytes, "status")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
nodeanonClient := clientsetForToken(tokenNodeUnknown, clientConfig)
|
||||
node1Client := clientsetForToken(tokenNode1, clientConfig)
|
||||
node2Client := clientsetForToken(tokenNode2, clientConfig)
|
||||
nodeanonClient, _ := clientsetForToken(tokenNodeUnknown, clientConfig)
|
||||
node1Client, node1ClientExternal := clientsetForToken(tokenNode1, clientConfig)
|
||||
node2Client, node2ClientExternal := clientsetForToken(tokenNode2, clientConfig)
|
||||
|
||||
// all node requests from node1 and unknown node fail
|
||||
expectForbidden(t, getSecret(nodeanonClient))
|
||||
@ -369,40 +428,64 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
|
||||
// re-create a pod as an admin to add object references
|
||||
expectAllowed(t, createNode2NormalPod(superuserClient))
|
||||
// With ExpandPersistentVolumes feature disabled
|
||||
|
||||
// ExpandPersistentVolumes feature disabled
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExpandPersistentVolumes, false)()
|
||||
// node->pvc relationship not established
|
||||
expectForbidden(t, updatePVCCapacity(node1Client))
|
||||
// node->pvc relationship established but feature is disabled
|
||||
expectForbidden(t, updatePVCCapacity(node2Client))
|
||||
|
||||
//Enabled ExpandPersistentVolumes feature
|
||||
// ExpandPersistentVolumes feature enabled
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExpandPersistentVolumes, true)()
|
||||
// Node->pvc relationship not established
|
||||
expectForbidden(t, updatePVCCapacity(node1Client))
|
||||
// node->pvc relationship established and feature is enabled
|
||||
expectAllowed(t, updatePVCCapacity(node2Client))
|
||||
// node->pvc relationship established but updating phase
|
||||
expectForbidden(t, updatePVCPhase(node2Client))
|
||||
|
||||
// Disabled CSIPersistentVolume feature
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIPersistentVolume, false)()
|
||||
expectForbidden(t, getVolumeAttachment(node1ClientExternal))
|
||||
expectForbidden(t, getVolumeAttachment(node2ClientExternal))
|
||||
// Enabled CSIPersistentVolume feature
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIPersistentVolume, true)()
|
||||
expectForbidden(t, getVolumeAttachment(node1ClientExternal))
|
||||
expectAllowed(t, getVolumeAttachment(node2ClientExternal))
|
||||
|
||||
//TODO(mikedanese): integration test node restriction of TokenRequest
|
||||
}
|
||||
|
||||
func expectForbidden(t *testing.T, err error) {
|
||||
if !errors.IsForbidden(err) {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
t.Errorf("%s:%d: Expected forbidden error, got %v", filepath.Base(file), line, err)
|
||||
// expect executes a function a set number of times until it either returns the
|
||||
// expected error or executes too many times. It returns if the retries timed
|
||||
// out and the last error returned by the method.
|
||||
func expect(t *testing.T, f func() error, wantErr func(error) bool) (timeout bool, lastErr error) {
|
||||
t.Helper()
|
||||
err := wait.PollImmediate(time.Second, 30*time.Second, func() (bool, error) {
|
||||
t.Helper()
|
||||
lastErr = f()
|
||||
if wantErr(lastErr) {
|
||||
return true, nil
|
||||
}
|
||||
t.Logf("unexpected response, will retry: %v", lastErr)
|
||||
return false, nil
|
||||
})
|
||||
return err == nil, lastErr
|
||||
}
|
||||
|
||||
func expectForbidden(t *testing.T, f func() error) {
|
||||
t.Helper()
|
||||
if ok, err := expect(t, f, errors.IsForbidden); !ok {
|
||||
t.Errorf("Expected forbidden error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func expectNotFound(t *testing.T, err error) {
|
||||
if !errors.IsNotFound(err) {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
t.Errorf("%s:%d: Expected notfound error, got %v", filepath.Base(file), line, err)
|
||||
func expectNotFound(t *testing.T, f func() error) {
|
||||
t.Helper()
|
||||
if ok, err := expect(t, f, errors.IsNotFound); !ok {
|
||||
t.Errorf("Expected notfound error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func expectAllowed(t *testing.T, err error) {
|
||||
if err != nil {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
t.Errorf("%s:%d: Expected no error, got %v", filepath.Base(file), line, err)
|
||||
func expectAllowed(t *testing.T, f func() error) {
|
||||
t.Helper()
|
||||
if ok, err := expect(t, f, func(e error) bool { return e == nil }); !ok {
|
||||
t.Errorf("Expected no error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
16
vendor/k8s.io/kubernetes/test/integration/auth/rbac_test.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/integration/auth/rbac_test.go
generated
vendored
@ -36,6 +36,7 @@ import (
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
externalclientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/transport"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
@ -65,10 +66,10 @@ func clientForToken(user string) *http.Client {
|
||||
}
|
||||
}
|
||||
|
||||
func clientsetForToken(user string, config *restclient.Config) clientset.Interface {
|
||||
func clientsetForToken(user string, config *restclient.Config) (clientset.Interface, externalclientset.Interface) {
|
||||
configCopy := *config
|
||||
configCopy.BearerToken = user
|
||||
return clientset.NewForConfigOrDie(&configCopy)
|
||||
return clientset.NewForConfigOrDie(&configCopy), externalclientset.NewForConfigOrDie(&configCopy)
|
||||
}
|
||||
|
||||
type testRESTOptionsGetter struct {
|
||||
@ -413,8 +414,8 @@ func TestRBAC(t *testing.T) {
|
||||
for i, tc := range tests {
|
||||
// Create an API Server.
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authorizer = newRBACAuthorizer(masterConfig)
|
||||
masterConfig.GenericConfig.Authenticator = bearertoken.New(tokenfile.New(map[string]*user.DefaultInfo{
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = newRBACAuthorizer(masterConfig)
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = bearertoken.New(tokenfile.New(map[string]*user.DefaultInfo{
|
||||
superUser: {Name: "admin", Groups: []string{"system:masters"}},
|
||||
"any-rolebinding-writer": {Name: "any-rolebinding-writer"},
|
||||
"any-rolebinding-writer-namespace": {Name: "any-rolebinding-writer-namespace"},
|
||||
@ -431,7 +432,8 @@ func TestRBAC(t *testing.T) {
|
||||
clientConfig := &restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}}
|
||||
|
||||
// Bootstrap the API Server with the test case's initial roles.
|
||||
if err := tc.bootstrapRoles.bootstrap(clientsetForToken(superUser, clientConfig)); err != nil {
|
||||
superuserClient, _ := clientsetForToken(superUser, clientConfig)
|
||||
if err := tc.bootstrapRoles.bootstrap(superuserClient); err != nil {
|
||||
t.Errorf("case %d: failed to apply initial roles: %v", i, err)
|
||||
continue
|
||||
}
|
||||
@ -515,8 +517,8 @@ func TestBootstrapping(t *testing.T) {
|
||||
superUser := "admin/system:masters"
|
||||
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authorizer = newRBACAuthorizer(masterConfig)
|
||||
masterConfig.GenericConfig.Authenticator = bearertoken.New(tokenfile.New(map[string]*user.DefaultInfo{
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = newRBACAuthorizer(masterConfig)
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = bearertoken.New(tokenfile.New(map[string]*user.DefaultInfo{
|
||||
superUser: {Name: "admin", Groups: []string{"system:masters"}},
|
||||
}))
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
|
462
vendor/k8s.io/kubernetes/test/integration/auth/svcaccttoken_test.go
generated
vendored
Normal file
462
vendor/k8s.io/kubernetes/test/integration/auth/svcaccttoken_test.go
generated
vendored
Normal file
@ -0,0 +1,462 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizerfactory"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
externalclientset "k8s.io/client-go/kubernetes"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
serviceaccountgetter "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
const ecdsaPrivateKey = `-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEIEZmTmUhuanLjPA2CLquXivuwBDHTt5XYwgIr/kA1LtRoAoGCCqGSM49
|
||||
AwEHoUQDQgAEH6cuzP8XuD5wal6wf9M6xDljTOPLX2i8uIp/C/ASqiIGUeeKQtX0
|
||||
/IR3qCXyThP/dbCiHrF3v1cuhBOHY8CLVg==
|
||||
-----END EC PRIVATE KEY-----`
|
||||
|
||||
func TestServiceAccountTokenCreate(t *testing.T) {
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TokenRequest, true)()
|
||||
|
||||
// Build client config, clientset, and informers
|
||||
sk, err := certutil.ParsePrivateKeyPEM([]byte(ecdsaPrivateKey))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
pk := sk.(*ecdsa.PrivateKey).PublicKey
|
||||
|
||||
const iss = "https://foo.bar.example.com"
|
||||
aud := []string{"api"}
|
||||
|
||||
gcs := &clientset.Clientset{}
|
||||
|
||||
// Start the server
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer()
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = bearertoken.New(
|
||||
serviceaccount.JWTTokenAuthenticator(
|
||||
iss,
|
||||
[]interface{}{&pk},
|
||||
serviceaccount.NewValidator(aud, serviceaccountgetter.NewGetterFromClient(gcs)),
|
||||
),
|
||||
)
|
||||
masterConfig.ExtraConfig.ServiceAccountIssuer = serviceaccount.JWTTokenGenerator(iss, sk)
|
||||
masterConfig.ExtraConfig.ServiceAccountAPIAudiences = aud
|
||||
|
||||
master, _, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
cs, err := clientset.NewForConfig(master.GenericAPIServer.LoopbackClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
*gcs = *cs
|
||||
|
||||
var (
|
||||
sa = &v1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-svcacct",
|
||||
Namespace: "myns",
|
||||
},
|
||||
}
|
||||
pod = &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pod",
|
||||
Namespace: sa.Namespace,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
ServiceAccountName: sa.Name,
|
||||
Containers: []v1.Container{{Name: "test-container", Image: "nginx"}},
|
||||
},
|
||||
}
|
||||
otherpod = &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "other-test-pod",
|
||||
Namespace: sa.Namespace,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
ServiceAccountName: "other-" + sa.Name,
|
||||
Containers: []v1.Container{{Name: "test-container", Image: "nginx"}},
|
||||
},
|
||||
}
|
||||
secret = &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-secret",
|
||||
Namespace: sa.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
one = int64(1)
|
||||
wrongUID = types.UID("wrong")
|
||||
noUID = types.UID("")
|
||||
)
|
||||
|
||||
t.Run("bound to service account", func(t *testing.T) {
|
||||
treq := &authenticationv1.TokenRequest{
|
||||
Spec: authenticationv1.TokenRequestSpec{
|
||||
Audiences: []string{"api"},
|
||||
ExpirationSeconds: &one,
|
||||
},
|
||||
}
|
||||
|
||||
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err == nil {
|
||||
t.Fatalf("expected err creating token for nonexistant svcacct but got: %#v", resp)
|
||||
}
|
||||
sa, delSvcAcct := createDeleteSvcAcct(t, cs, sa)
|
||||
defer delSvcAcct()
|
||||
|
||||
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
checkPayload(t, treq.Status.Token, `"system:serviceaccount:myns:test-svcacct"`, "sub")
|
||||
checkPayload(t, treq.Status.Token, `["api"]`, "aud")
|
||||
checkPayload(t, treq.Status.Token, "null", "kubernetes.io", "pod")
|
||||
checkPayload(t, treq.Status.Token, "null", "kubernetes.io", "secret")
|
||||
checkPayload(t, treq.Status.Token, `"myns"`, "kubernetes.io", "namespace")
|
||||
checkPayload(t, treq.Status.Token, `"test-svcacct"`, "kubernetes.io", "serviceaccount", "name")
|
||||
|
||||
doTokenReview(t, cs, treq, false)
|
||||
delSvcAcct()
|
||||
doTokenReview(t, cs, treq, true)
|
||||
})
|
||||
|
||||
t.Run("bound to service account and pod", func(t *testing.T) {
|
||||
treq := &authenticationv1.TokenRequest{
|
||||
Spec: authenticationv1.TokenRequestSpec{
|
||||
Audiences: []string{"api"},
|
||||
ExpirationSeconds: &one,
|
||||
BoundObjectRef: &authenticationv1.BoundObjectReference{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
Name: pod.Name,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err == nil {
|
||||
t.Fatalf("expected err creating token for nonexistant svcacct but got: %#v", resp)
|
||||
}
|
||||
sa, del := createDeleteSvcAcct(t, cs, sa)
|
||||
defer del()
|
||||
|
||||
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err == nil {
|
||||
t.Fatalf("expected err creating token bound to nonexistant pod but got: %#v", resp)
|
||||
}
|
||||
pod, delPod := createDeletePod(t, cs, pod)
|
||||
defer delPod()
|
||||
|
||||
// right uid
|
||||
treq.Spec.BoundObjectRef.UID = pod.UID
|
||||
if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
// wrong uid
|
||||
treq.Spec.BoundObjectRef.UID = wrongUID
|
||||
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err == nil {
|
||||
t.Fatalf("expected err creating token bound to pod with wrong uid but got: %#v", resp)
|
||||
}
|
||||
// no uid
|
||||
treq.Spec.BoundObjectRef.UID = noUID
|
||||
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
checkPayload(t, treq.Status.Token, `"system:serviceaccount:myns:test-svcacct"`, "sub")
|
||||
checkPayload(t, treq.Status.Token, `["api"]`, "aud")
|
||||
checkPayload(t, treq.Status.Token, `"test-pod"`, "kubernetes.io", "pod", "name")
|
||||
checkPayload(t, treq.Status.Token, "null", "kubernetes.io", "secret")
|
||||
checkPayload(t, treq.Status.Token, `"myns"`, "kubernetes.io", "namespace")
|
||||
checkPayload(t, treq.Status.Token, `"test-svcacct"`, "kubernetes.io", "serviceaccount", "name")
|
||||
|
||||
doTokenReview(t, cs, treq, false)
|
||||
delPod()
|
||||
doTokenReview(t, cs, treq, true)
|
||||
})
|
||||
|
||||
t.Run("bound to service account and secret", func(t *testing.T) {
|
||||
treq := &authenticationv1.TokenRequest{
|
||||
Spec: authenticationv1.TokenRequestSpec{
|
||||
Audiences: []string{"api"},
|
||||
ExpirationSeconds: &one,
|
||||
BoundObjectRef: &authenticationv1.BoundObjectReference{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
Name: secret.Name,
|
||||
UID: secret.UID,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err == nil {
|
||||
t.Fatalf("expected err creating token for nonexistant svcacct but got: %#v", resp)
|
||||
}
|
||||
sa, del := createDeleteSvcAcct(t, cs, sa)
|
||||
defer del()
|
||||
|
||||
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err == nil {
|
||||
t.Fatalf("expected err creating token bound to nonexistant secret but got: %#v", resp)
|
||||
}
|
||||
secret, delSecret := createDeleteSecret(t, cs, secret)
|
||||
defer delSecret()
|
||||
|
||||
// right uid
|
||||
treq.Spec.BoundObjectRef.UID = secret.UID
|
||||
if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
// wrong uid
|
||||
treq.Spec.BoundObjectRef.UID = wrongUID
|
||||
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err == nil {
|
||||
t.Fatalf("expected err creating token bound to secret with wrong uid but got: %#v", resp)
|
||||
}
|
||||
// no uid
|
||||
treq.Spec.BoundObjectRef.UID = noUID
|
||||
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
checkPayload(t, treq.Status.Token, `"system:serviceaccount:myns:test-svcacct"`, "sub")
|
||||
checkPayload(t, treq.Status.Token, `["api"]`, "aud")
|
||||
checkPayload(t, treq.Status.Token, `null`, "kubernetes.io", "pod")
|
||||
checkPayload(t, treq.Status.Token, `"test-secret"`, "kubernetes.io", "secret", "name")
|
||||
checkPayload(t, treq.Status.Token, `"myns"`, "kubernetes.io", "namespace")
|
||||
checkPayload(t, treq.Status.Token, `"test-svcacct"`, "kubernetes.io", "serviceaccount", "name")
|
||||
|
||||
doTokenReview(t, cs, treq, false)
|
||||
delSecret()
|
||||
doTokenReview(t, cs, treq, true)
|
||||
})
|
||||
|
||||
t.Run("bound to service account and pod running as different service account", func(t *testing.T) {
|
||||
treq := &authenticationv1.TokenRequest{
|
||||
Spec: authenticationv1.TokenRequestSpec{
|
||||
Audiences: []string{"api"},
|
||||
ExpirationSeconds: &one,
|
||||
BoundObjectRef: &authenticationv1.BoundObjectReference{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
Name: otherpod.Name,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
sa, del := createDeleteSvcAcct(t, cs, sa)
|
||||
defer del()
|
||||
_, del = createDeletePod(t, cs, otherpod)
|
||||
defer del()
|
||||
|
||||
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err == nil {
|
||||
t.Fatalf("expected err but got: %#v", resp)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("expired token", func(t *testing.T) {
|
||||
treq := &authenticationv1.TokenRequest{
|
||||
Spec: authenticationv1.TokenRequestSpec{
|
||||
Audiences: []string{"api"},
|
||||
ExpirationSeconds: &one,
|
||||
},
|
||||
}
|
||||
|
||||
sa, del := createDeleteSvcAcct(t, cs, sa)
|
||||
defer del()
|
||||
|
||||
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
doTokenReview(t, cs, treq, false)
|
||||
time.Sleep(63 * time.Second)
|
||||
doTokenReview(t, cs, treq, true)
|
||||
})
|
||||
|
||||
t.Run("a token without an api audience is invalid", func(t *testing.T) {
|
||||
treq := &authenticationv1.TokenRequest{
|
||||
Spec: authenticationv1.TokenRequestSpec{
|
||||
Audiences: []string{"not-the-api"},
|
||||
},
|
||||
}
|
||||
|
||||
sa, del := createDeleteSvcAcct(t, cs, sa)
|
||||
defer del()
|
||||
|
||||
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
doTokenReview(t, cs, treq, true)
|
||||
})
|
||||
|
||||
t.Run("a tokenrequest without an audience is valid against the api", func(t *testing.T) {
|
||||
treq := &authenticationv1.TokenRequest{
|
||||
Spec: authenticationv1.TokenRequestSpec{},
|
||||
}
|
||||
|
||||
sa, del := createDeleteSvcAcct(t, cs, sa)
|
||||
defer del()
|
||||
|
||||
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
checkPayload(t, treq.Status.Token, `["api"]`, "aud")
|
||||
|
||||
doTokenReview(t, cs, treq, false)
|
||||
})
|
||||
}
|
||||
|
||||
func doTokenReview(t *testing.T, cs externalclientset.Interface, treq *authenticationv1.TokenRequest, expectErr bool) {
|
||||
t.Helper()
|
||||
trev, err := cs.AuthenticationV1().TokenReviews().Create(&authenticationv1.TokenReview{
|
||||
Spec: authenticationv1.TokenReviewSpec{
|
||||
Token: treq.Status.Token,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
t.Logf("status: %+v", trev.Status)
|
||||
if (trev.Status.Error != "") && !expectErr {
|
||||
t.Fatalf("expected no error but got: %v", trev.Status.Error)
|
||||
}
|
||||
if (trev.Status.Error == "") && expectErr {
|
||||
t.Fatalf("expected error but got: %+v", trev.Status)
|
||||
}
|
||||
if !trev.Status.Authenticated && !expectErr {
|
||||
t.Fatal("expected token to be authenticated but it wasn't")
|
||||
}
|
||||
}
|
||||
|
||||
func checkPayload(t *testing.T, tok string, want string, parts ...string) {
|
||||
t.Helper()
|
||||
got := getSubObject(t, getPayload(t, tok), parts...)
|
||||
if got != want {
|
||||
t.Errorf("unexpected payload.\nsaw:\t%v\nwant:\t%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func getSubObject(t *testing.T, b string, parts ...string) string {
|
||||
t.Helper()
|
||||
var obj interface{}
|
||||
obj = make(map[string]interface{})
|
||||
if err := json.Unmarshal([]byte(b), &obj); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
for _, part := range parts {
|
||||
obj = obj.(map[string]interface{})[part]
|
||||
}
|
||||
out, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
|
||||
func getPayload(t *testing.T, b string) string {
|
||||
t.Helper()
|
||||
parts := strings.Split(b, ".")
|
||||
if len(parts) != 3 {
|
||||
t.Fatalf("token did not have three parts: %v", b)
|
||||
}
|
||||
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
|
||||
if err != nil {
|
||||
t.Fatalf("failed to base64 decode token: %v", err)
|
||||
}
|
||||
return string(payload)
|
||||
}
|
||||
|
||||
func createDeleteSvcAcct(t *testing.T, cs clientset.Interface, sa *v1.ServiceAccount) (*v1.ServiceAccount, func()) {
|
||||
t.Helper()
|
||||
sa, err := cs.CoreV1().ServiceAccounts(sa.Namespace).Create(sa)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
done := false
|
||||
return sa, func() {
|
||||
t.Helper()
|
||||
if done {
|
||||
return
|
||||
}
|
||||
done = true
|
||||
if err := cs.CoreV1().ServiceAccounts(sa.Namespace).Delete(sa.Name, nil); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createDeletePod(t *testing.T, cs clientset.Interface, pod *v1.Pod) (*v1.Pod, func()) {
|
||||
t.Helper()
|
||||
pod, err := cs.CoreV1().Pods(pod.Namespace).Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
done := false
|
||||
return pod, func() {
|
||||
t.Helper()
|
||||
if done {
|
||||
return
|
||||
}
|
||||
done = true
|
||||
if err := cs.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createDeleteSecret(t *testing.T, cs clientset.Interface, sec *v1.Secret) (*v1.Secret, func()) {
|
||||
t.Helper()
|
||||
sec, err := cs.CoreV1().Secrets(sec.Namespace).Create(sec)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
done := false
|
||||
return sec, func() {
|
||||
t.Helper()
|
||||
if done {
|
||||
return
|
||||
}
|
||||
done = true
|
||||
if err := cs.CoreV1().Secrets(sec.Namespace).Delete(sec.Name, nil); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
2
vendor/k8s.io/kubernetes/test/integration/benchmark-controller.json
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/benchmark-controller.json
generated
vendored
@ -17,7 +17,7 @@
|
||||
"spec": {
|
||||
"containers": [{
|
||||
"name": "test-container",
|
||||
"image": "gcr.io/google_containers/pause-amd64:3.0"
|
||||
"image": "k8s.gcr.io/pause-amd64:3.1"
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
32
vendor/k8s.io/kubernetes/test/integration/benchmark/jsonify/BUILD
generated
vendored
Normal file
32
vendor/k8s.io/kubernetes/test/integration/benchmark/jsonify/BUILD
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["main.go"],
|
||||
importpath = "k8s.io/kubernetes/test/integration/benchmark/jsonify",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//test/e2e/perftype:go_default_library",
|
||||
"//vendor/golang.org/x/tools/benchmark/parse:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "jsonify",
|
||||
embed = [":go_default_library"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
77
vendor/k8s.io/kubernetes/test/integration/benchmark/jsonify/main.go
generated
vendored
Normal file
77
vendor/k8s.io/kubernetes/test/integration/benchmark/jsonify/main.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
benchparse "golang.org/x/tools/benchmark/parse"
|
||||
"k8s.io/kubernetes/test/e2e/perftype"
|
||||
)
|
||||
|
||||
func main() {
|
||||
err := run()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func run() error {
|
||||
if len(os.Args) < 2 {
|
||||
return fmt.Errorf("output filename is a required argument")
|
||||
}
|
||||
benchmarkSet, err := benchparse.ParseSet(os.Stdin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data := perftype.PerfData{Version: "v1"}
|
||||
for _, benchMarks := range benchmarkSet {
|
||||
for _, benchMark := range benchMarks {
|
||||
data.DataItems = appendIfMeasured(data.DataItems, benchMark, benchparse.NsPerOp, "time", "μs", benchMark.NsPerOp/1000.0)
|
||||
data.DataItems = appendIfMeasured(data.DataItems, benchMark, benchparse.MBPerS, "throughput", "MBps", benchMark.MBPerS)
|
||||
data.DataItems = appendIfMeasured(data.DataItems, benchMark, benchparse.AllocedBytesPerOp, "allocated", "bytes", float64(benchMark.AllocedBytesPerOp))
|
||||
data.DataItems = appendIfMeasured(data.DataItems, benchMark, benchparse.AllocsPerOp, "allocations", "1", float64(benchMark.AllocsPerOp))
|
||||
data.DataItems = appendIfMeasured(data.DataItems, benchMark, 0, "iterations", "1", float64(benchMark.N))
|
||||
}
|
||||
}
|
||||
output := &bytes.Buffer{}
|
||||
if err := json.NewEncoder(output).Encode(data); err != nil {
|
||||
return err
|
||||
}
|
||||
formatted := &bytes.Buffer{}
|
||||
if err := json.Indent(formatted, output.Bytes(), "", " "); err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(os.Args[1], formatted.Bytes(), 0664)
|
||||
}
|
||||
|
||||
func appendIfMeasured(items []perftype.DataItem, benchmark *benchparse.Benchmark, metricType int, metricName string, unit string, value float64) []perftype.DataItem {
|
||||
if metricType != 0 && (benchmark.Measured&metricType) == 0 {
|
||||
return items
|
||||
}
|
||||
return append(items, perftype.DataItem{
|
||||
Unit: unit,
|
||||
Labels: map[string]string{
|
||||
"benchmark": benchmark.Name,
|
||||
"metricName": metricName},
|
||||
Data: map[string]float64{
|
||||
"value": value}})
|
||||
}
|
1
vendor/k8s.io/kubernetes/test/integration/client/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/integration/client/BUILD
generated
vendored
@ -13,7 +13,6 @@ go_test(
|
||||
"dynamic_client_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/client",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/client/client_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/client/client_test.go
generated
vendored
@ -563,7 +563,7 @@ func TestSingleWatch(t *testing.T) {
|
||||
|
||||
func TestMultiWatch(t *testing.T) {
|
||||
// Disable this test as long as it demonstrates a problem.
|
||||
// TODO: Reenable this test when we get #6059 resolved.
|
||||
// TODO: Re-enable this test when we get #6059 resolved.
|
||||
t.Skip()
|
||||
|
||||
const watcherCount = 50
|
||||
|
1
vendor/k8s.io/kubernetes/test/integration/configmap/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/integration/configmap/BUILD
generated
vendored
@ -12,7 +12,6 @@ go_test(
|
||||
"configmap_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/configmap",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
|
5
vendor/k8s.io/kubernetes/test/integration/daemonset/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/test/integration/daemonset/BUILD
generated
vendored
@ -12,23 +12,22 @@ go_test(
|
||||
"daemonset_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/daemonset",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/controller/daemon:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
|
51
vendor/k8s.io/kubernetes/test/integration/daemonset/daemonset_test.go
generated
vendored
51
vendor/k8s.io/kubernetes/test/integration/daemonset/daemonset_test.go
generated
vendored
@ -22,16 +22,16 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
appstyped "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
corev1typed "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
extensionsv1beta1typed "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
@ -53,8 +53,8 @@ func setup(t *testing.T) (*httptest.Server, framework.CloseFunc, *daemon.DaemonS
|
||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "daemonset-informers")), resyncPeriod)
|
||||
metrics.UnregisterMetricAndUntrackRateLimiterUsage("daemon_controller")
|
||||
dc, err := daemon.NewDaemonSetsController(
|
||||
informers.Extensions().V1beta1().DaemonSets(),
|
||||
informers.Apps().V1beta1().ControllerRevisions(),
|
||||
informers.Apps().V1().DaemonSets(),
|
||||
informers.Apps().V1().ControllerRevisions(),
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Core().V1().Nodes(),
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "daemonset-controller")),
|
||||
@ -70,22 +70,22 @@ func testLabels() map[string]string {
|
||||
return map[string]string{"name": "test"}
|
||||
}
|
||||
|
||||
func newDaemonSet(name, namespace string) *v1beta1.DaemonSet {
|
||||
func newDaemonSet(name, namespace string) *apps.DaemonSet {
|
||||
two := int32(2)
|
||||
return &v1beta1.DaemonSet{
|
||||
return &apps.DaemonSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DaemonSet",
|
||||
APIVersion: "extensions/v1beta1",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1beta1.DaemonSetSpec{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
RevisionHistoryLimit: &two,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: testLabels()},
|
||||
UpdateStrategy: v1beta1.DaemonSetUpdateStrategy{
|
||||
Type: v1beta1.OnDeleteDaemonSetStrategyType,
|
||||
UpdateStrategy: apps.DaemonSetUpdateStrategy{
|
||||
Type: apps.OnDeleteDaemonSetStrategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -99,22 +99,22 @@ func newDaemonSet(name, namespace string) *v1beta1.DaemonSet {
|
||||
}
|
||||
}
|
||||
|
||||
func newRollbackStrategy() *v1beta1.DaemonSetUpdateStrategy {
|
||||
func newRollbackStrategy() *apps.DaemonSetUpdateStrategy {
|
||||
one := intstr.FromInt(1)
|
||||
return &v1beta1.DaemonSetUpdateStrategy{
|
||||
Type: v1beta1.RollingUpdateDaemonSetStrategyType,
|
||||
RollingUpdate: &v1beta1.RollingUpdateDaemonSet{MaxUnavailable: &one},
|
||||
return &apps.DaemonSetUpdateStrategy{
|
||||
Type: apps.RollingUpdateDaemonSetStrategyType,
|
||||
RollingUpdate: &apps.RollingUpdateDaemonSet{MaxUnavailable: &one},
|
||||
}
|
||||
}
|
||||
|
||||
func newOnDeleteStrategy() *v1beta1.DaemonSetUpdateStrategy {
|
||||
return &v1beta1.DaemonSetUpdateStrategy{
|
||||
Type: v1beta1.OnDeleteDaemonSetStrategyType,
|
||||
func newOnDeleteStrategy() *apps.DaemonSetUpdateStrategy {
|
||||
return &apps.DaemonSetUpdateStrategy{
|
||||
Type: apps.OnDeleteDaemonSetStrategyType,
|
||||
}
|
||||
}
|
||||
|
||||
func updateStrategies() []*v1beta1.DaemonSetUpdateStrategy {
|
||||
return []*v1beta1.DaemonSetUpdateStrategy{newOnDeleteStrategy(), newRollbackStrategy()}
|
||||
func updateStrategies() []*apps.DaemonSetUpdateStrategy {
|
||||
return []*apps.DaemonSetUpdateStrategy{newOnDeleteStrategy(), newRollbackStrategy()}
|
||||
}
|
||||
|
||||
func allocatableResources(memory, cpu string) v1.ResourceList {
|
||||
@ -189,9 +189,6 @@ func validateDaemonSetPodsAndMarkReady(
|
||||
return false, fmt.Errorf("Pod %s has %d OwnerReferences, expected only 1", pod.Name, len(ownerReferences))
|
||||
}
|
||||
controllerRef := ownerReferences[0]
|
||||
if got, want := controllerRef.APIVersion, "extensions/v1beta1"; got != want {
|
||||
t.Errorf("controllerRef.APIVersion = %q, want %q", got, want)
|
||||
}
|
||||
if got, want := controllerRef.Kind, "DaemonSet"; got != want {
|
||||
t.Errorf("controllerRef.Kind = %q, want %q", got, want)
|
||||
}
|
||||
@ -219,7 +216,7 @@ func validateDaemonSetPodsAndMarkReady(
|
||||
}
|
||||
|
||||
func validateDaemonSetStatus(
|
||||
dsClient extensionsv1beta1typed.DaemonSetInterface,
|
||||
dsClient appstyped.DaemonSetInterface,
|
||||
dsName string,
|
||||
dsNamespace string,
|
||||
expectedNumberReady int32,
|
||||
@ -267,7 +264,7 @@ func TestOneNodeDaemonLaunchesPod(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("one-node-daemonset-test", server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
dsClient := clientset.ExtensionsV1beta1().DaemonSets(ns.Name)
|
||||
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
|
||||
podClient := clientset.CoreV1().Pods(ns.Name)
|
||||
nodeClient := clientset.CoreV1().Nodes()
|
||||
podInformer := informers.Core().V1().Pods().Informer()
|
||||
@ -300,7 +297,7 @@ func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
dsClient := clientset.ExtensionsV1beta1().DaemonSets(ns.Name)
|
||||
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
|
||||
podClient := clientset.CoreV1().Pods(ns.Name)
|
||||
nodeClient := clientset.CoreV1().Nodes()
|
||||
podInformer := informers.Core().V1().Pods().Informer()
|
||||
@ -330,7 +327,7 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
dsClient := clientset.ExtensionsV1beta1().DaemonSets(ns.Name)
|
||||
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
|
||||
podClient := clientset.CoreV1().Pods(ns.Name)
|
||||
nodeClient := clientset.CoreV1().Nodes()
|
||||
podInformer := informers.Core().V1().Pods().Informer()
|
||||
@ -367,7 +364,7 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("insufficient-capacity", server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
dsClient := clientset.ExtensionsV1beta1().DaemonSets(ns.Name)
|
||||
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
|
||||
nodeClient := clientset.CoreV1().Nodes()
|
||||
eventClient := corev1typed.New(clientset.CoreV1().RESTClient()).Events(ns.Namespace)
|
||||
stopCh := make(chan struct{})
|
||||
|
3
vendor/k8s.io/kubernetes/test/integration/defaulttolerationseconds/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/test/integration/defaulttolerationseconds/BUILD
generated
vendored
@ -12,7 +12,6 @@ go_test(
|
||||
"defaulttolerationseconds_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/defaulttolerationseconds",
|
||||
tags = [
|
||||
"etcd",
|
||||
"integration",
|
||||
@ -20,8 +19,8 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//plugin/pkg/admission/defaulttolerationseconds:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
@ -25,8 +25,8 @@ import (
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
|
6
vendor/k8s.io/kubernetes/test/integration/deployment/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/test/integration/deployment/BUILD
generated
vendored
@ -13,17 +13,19 @@ go_test(
|
||||
"deployment_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/deployment",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/util/pointer:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
432
vendor/k8s.io/kubernetes/test/integration/deployment/deployment_test.go
generated
vendored
432
vendor/k8s.io/kubernetes/test/integration/deployment/deployment_test.go
generated
vendored
@ -26,8 +26,11 @@ import (
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/retry"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/util/pointer"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
@ -775,7 +778,7 @@ func TestFailedDeployment(t *testing.T) {
|
||||
go rm.Run(5, stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
if err = tester.waitForDeploymentUpdatedReplicasLTE(replicas); err != nil {
|
||||
if err = tester.waitForDeploymentUpdatedReplicasGTE(replicas); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -1068,3 +1071,430 @@ func TestScaledRolloutDeployment(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSpecReplicasChange(t *testing.T) {
|
||||
s, closeFn, rm, dc, informers, c := dcSetup(t)
|
||||
defer closeFn()
|
||||
name := "test-spec-replicas-change"
|
||||
ns := framework.CreateTestingNamespace(name, s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
deploymentName := "deployment"
|
||||
replicas := int32(1)
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
|
||||
tester.deployment.Spec.Strategy.Type = v1beta1.RecreateDeploymentStrategyType
|
||||
tester.deployment.Spec.Strategy.RollingUpdate = nil
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
|
||||
// Start informer and controllers
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Scale up/down deployment and verify its replicaset has matching .spec.replicas
|
||||
if err = tester.scaleDeployment(2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = tester.scaleDeployment(0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = tester.scaleDeployment(1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Add a template annotation change to test deployment's status does update
|
||||
// without .spec.replicas change
|
||||
var oldGeneration int64
|
||||
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
|
||||
oldGeneration = update.Generation
|
||||
update.Spec.RevisionHistoryLimit = pointer.Int32Ptr(4)
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed updating deployment %q: %v", tester.deployment.Name, err)
|
||||
}
|
||||
|
||||
savedGeneration := tester.deployment.Generation
|
||||
if savedGeneration == oldGeneration {
|
||||
t.Fatalf("Failed to verify .Generation has incremented for deployment %q", deploymentName)
|
||||
}
|
||||
if err = tester.waitForObservedDeployment(savedGeneration); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeploymentAvailableCondition(t *testing.T) {
|
||||
s, closeFn, rm, dc, informers, c := dcSetup(t)
|
||||
defer closeFn()
|
||||
name := "test-deployment-available-condition"
|
||||
ns := framework.CreateTestingNamespace(name, s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
deploymentName := "deployment"
|
||||
replicas := int32(10)
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
|
||||
// Assign a high value to the deployment's minReadySeconds
|
||||
tester.deployment.Spec.MinReadySeconds = 3600
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
|
||||
// Start informer and controllers
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Wait for the deployment to be observed by the controller and has at least specified number of updated replicas
|
||||
if err = tester.waitForDeploymentUpdatedReplicasGTE(replicas); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for the deployment to have MinimumReplicasUnavailable reason because the pods are not marked as ready
|
||||
if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasUnavailable, v1beta1.DeploymentAvailable); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify all replicas fields of DeploymentStatus have desired counts
|
||||
if err = tester.checkDeploymentStatusReplicasFields(10, 10, 0, 0, 10); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Mark the pods as ready without waiting for the deployment to complete
|
||||
if err = tester.markUpdatedPodsReadyWithoutComplete(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for number of ready replicas to equal number of replicas.
|
||||
if err = tester.waitForReadyReplicas(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for the deployment to still have MinimumReplicasUnavailable reason within minReadySeconds period
|
||||
if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasUnavailable, v1beta1.DeploymentAvailable); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify all replicas fields of DeploymentStatus have desired counts
|
||||
if err = tester.checkDeploymentStatusReplicasFields(10, 10, 10, 0, 10); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Update the deployment's minReadySeconds to a small value
|
||||
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
|
||||
update.Spec.MinReadySeconds = 1
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed updating deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
|
||||
// Wait for the deployment to notice minReadySeconds has changed
|
||||
if err := tester.waitForObservedDeployment(tester.deployment.Generation); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for the deployment to have MinimumReplicasAvailable reason after minReadySeconds period
|
||||
if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasAvailable, v1beta1.DeploymentAvailable); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify all replicas fields of DeploymentStatus have desired counts
|
||||
if err = tester.checkDeploymentStatusReplicasFields(10, 10, 10, 10, 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for deployment to automatically patch incorrect ControllerRef of RS
|
||||
func testRSControllerRefPatch(t *testing.T, tester *deploymentTester, rs *v1beta1.ReplicaSet, ownerReference *metav1.OwnerReference, expectedOwnerReferenceNum int) {
|
||||
ns := rs.Namespace
|
||||
rsClient := tester.c.ExtensionsV1beta1().ReplicaSets(ns)
|
||||
rs, err := tester.updateReplicaSet(rs.Name, func(update *v1beta1.ReplicaSet) {
|
||||
update.OwnerReferences = []metav1.OwnerReference{*ownerReference}
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to update replicaset %q: %v", rs.Name, err)
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return metav1.GetControllerOf(newRS) != nil, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to wait for controllerRef of the replicaset %q to become nil: %v", rs.Name, err)
|
||||
}
|
||||
|
||||
newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to obtain replicaset %q: %v", rs.Name, err)
|
||||
}
|
||||
controllerRef := metav1.GetControllerOf(newRS)
|
||||
if controllerRef.UID != tester.deployment.UID {
|
||||
t.Fatalf("controllerRef of replicaset %q has a different UID: Expected %v, got %v", newRS.Name, tester.deployment.UID, controllerRef.UID)
|
||||
}
|
||||
ownerReferenceNum := len(newRS.GetOwnerReferences())
|
||||
if ownerReferenceNum != expectedOwnerReferenceNum {
|
||||
t.Fatalf("unexpected number of owner references for replicaset %q: Expected %d, got %d", newRS.Name, expectedOwnerReferenceNum, ownerReferenceNum)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGeneralReplicaSetAdoption(t *testing.T) {
|
||||
s, closeFn, rm, dc, informers, c := dcSetup(t)
|
||||
defer closeFn()
|
||||
name := "test-general-replicaset-adoption"
|
||||
ns := framework.CreateTestingNamespace(name, s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
deploymentName := "deployment"
|
||||
replicas := int32(1)
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
|
||||
// Start informer and controllers
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Wait for the Deployment to be updated to revision 1
|
||||
if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Ensure the deployment completes while marking its pods as ready simultaneously
|
||||
if err := tester.waitForDeploymentCompleteAndMarkPodsReady(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Get replicaset of the deployment
|
||||
rs, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get replicaset of deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
if rs == nil {
|
||||
t.Fatalf("unable to find replicaset of deployment %q", deploymentName)
|
||||
}
|
||||
|
||||
// When the only OwnerReference of the RS points to another type of API object such as statefulset
|
||||
// with Controller=false, the deployment should add a second OwnerReference (ControllerRef) pointing to itself
|
||||
// with Controller=true
|
||||
var falseVar = false
|
||||
ownerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "apps/v1beta1", Kind: "StatefulSet", Name: deploymentName, Controller: &falseVar}
|
||||
testRSControllerRefPatch(t, tester, rs, &ownerReference, 2)
|
||||
|
||||
// When the only OwnerReference of the RS points to the deployment with Controller=false,
|
||||
// the deployment should set Controller=true for the only OwnerReference
|
||||
ownerReference = metav1.OwnerReference{UID: tester.deployment.UID, APIVersion: "extensions/v1beta1", Kind: "Deployment", Name: deploymentName, Controller: &falseVar}
|
||||
testRSControllerRefPatch(t, tester, rs, &ownerReference, 1)
|
||||
}
|
||||
|
||||
func testScalingUsingScaleSubresource(t *testing.T, tester *deploymentTester, replicas int32) {
|
||||
ns := tester.deployment.Namespace
|
||||
deploymentName := tester.deployment.Name
|
||||
deploymentClient := tester.c.ExtensionsV1beta1().Deployments(ns)
|
||||
deployment, err := deploymentClient.Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
kind := "Deployment"
|
||||
scaleClient := tester.c.ExtensionsV1beta1().Scales(ns)
|
||||
scale, err := scaleClient.Get(kind, deploymentName)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain scale subresource for deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
if scale.Spec.Replicas != *deployment.Spec.Replicas {
|
||||
t.Fatalf("Scale subresource for deployment %q does not match .Spec.Replicas: expected %d, got %d", deploymentName, *deployment.Spec.Replicas, scale.Spec.Replicas)
|
||||
}
|
||||
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
scale, err := scaleClient.Get(kind, deploymentName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scale.Spec.Replicas = replicas
|
||||
_, err = scaleClient.Update(kind, scale)
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to set .Spec.Replicas of scale subresource for deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
|
||||
deployment, err = deploymentClient.Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
if *deployment.Spec.Replicas != replicas {
|
||||
t.Fatalf(".Spec.Replicas of deployment %q does not match its scale subresource: expected %d, got %d", deploymentName, replicas, *deployment.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeploymentScaleSubresource(t *testing.T) {
|
||||
s, closeFn, rm, dc, informers, c := dcSetup(t)
|
||||
defer closeFn()
|
||||
name := "test-deployment-scale-subresource"
|
||||
ns := framework.CreateTestingNamespace(name, s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
deploymentName := "deployment"
|
||||
replicas := int32(2)
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
|
||||
// Start informer and controllers
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Wait for the Deployment to be updated to revision 1
|
||||
if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Ensure the deployment completes while marking its pods as ready simultaneously
|
||||
if err := tester.waitForDeploymentCompleteAndMarkPodsReady(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Use scale subresource to scale the deployment up to 3
|
||||
testScalingUsingScaleSubresource(t, tester, 3)
|
||||
// Use the scale subresource to scale the deployment down to 0
|
||||
testScalingUsingScaleSubresource(t, tester, 0)
|
||||
}
|
||||
|
||||
// This test verifies that the Deployment does orphan a ReplicaSet when the ReplicaSet's
|
||||
// .Labels field is changed to no longer match the Deployment's selector. It also partially
|
||||
// verifies that collision avoidance mechanism is triggered when a Deployment's new ReplicaSet
|
||||
// is orphaned, even without PodTemplateSpec change. Refer comment below for more info:
|
||||
// https://github.com/kubernetes/kubernetes/pull/59212#discussion_r166465113
|
||||
func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
|
||||
s, closeFn, rm, dc, informers, c := dcSetup(t)
|
||||
defer closeFn()
|
||||
name := "test-replicaset-orphaning-and-adoption-when-labels-change"
|
||||
ns := framework.CreateTestingNamespace(name, s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
deploymentName := "deployment"
|
||||
replicas := int32(1)
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
|
||||
// Start informer and controllers
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Wait for the Deployment to be updated to revision 1
|
||||
if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Ensure the deployment completes while marking its pods as ready simultaneously
|
||||
if err := tester.waitForDeploymentCompleteAndMarkPodsReady(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Orphaning: deployment should remove OwnerReference from a RS when the RS's labels change to not match its labels
|
||||
|
||||
// Get replicaset of the deployment
|
||||
rs, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get replicaset of deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
if rs == nil {
|
||||
t.Fatalf("unable to find replicaset of deployment %q", deploymentName)
|
||||
}
|
||||
|
||||
// Verify controllerRef of the replicaset is not nil and pointing to the deployment
|
||||
controllerRef := metav1.GetControllerOf(rs)
|
||||
if controllerRef == nil {
|
||||
t.Fatalf("controllerRef of replicaset %q is nil", rs.Name)
|
||||
}
|
||||
if controllerRef.UID != tester.deployment.UID {
|
||||
t.Fatalf("controllerRef of replicaset %q has a different UID: Expected %v, got %v", rs.Name, tester.deployment.UID, controllerRef.UID)
|
||||
}
|
||||
|
||||
// Change the replicaset's labels to not match the deployment's labels
|
||||
labelMap := map[string]string{"new-name": "new-test"}
|
||||
rs, err = tester.updateReplicaSet(rs.Name, func(update *v1beta1.ReplicaSet) {
|
||||
update.Labels = labelMap
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to update replicaset %q: %v", rs.Name, err)
|
||||
}
|
||||
|
||||
// Wait for the controllerRef of the replicaset to become nil
|
||||
rsClient := tester.c.ExtensionsV1beta1().ReplicaSets(ns.Name)
|
||||
if err = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
rs, err = rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return metav1.GetControllerOf(rs) == nil, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to wait for controllerRef of replicaset %q to become nil: %v", rs.Name, err)
|
||||
}
|
||||
|
||||
// Wait for the deployment to create a new replicaset
|
||||
// This will trigger collision avoidance due to deterministic nature of replicaset name
|
||||
// i.e., the new replicaset will have a name with different hash to preserve name uniqueness
|
||||
var newRS *v1beta1.ReplicaSet
|
||||
if err = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
newRS, err = deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1())
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get new replicaset of deployment %q after orphaning: %v", deploymentName, err)
|
||||
}
|
||||
return newRS != nil, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to wait for deployment %q to create a new replicaset after orphaning: %v", deploymentName, err)
|
||||
}
|
||||
if newRS.UID == rs.UID {
|
||||
t.Fatalf("expect deployment %q to create a new replicaset different from the orphaned one, but it isn't", deploymentName)
|
||||
}
|
||||
|
||||
// Adoption: deployment should add controllerRef to a RS when the RS's labels change to match its labels
|
||||
|
||||
// Change the old replicaset's labels to match the deployment's labels
|
||||
rs, err = tester.updateReplicaSet(rs.Name, func(update *v1beta1.ReplicaSet) {
|
||||
update.Labels = testLabels()
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to update replicaset %q: %v", rs.Name, err)
|
||||
}
|
||||
|
||||
// Wait for the deployment to adopt the old replicaset
|
||||
if err = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
rs, err := rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
controllerRef = metav1.GetControllerOf(rs)
|
||||
return controllerRef != nil && controllerRef.UID == tester.deployment.UID, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("failed waiting for replicaset adoption by deployment %q to complete: %v", deploymentName, err)
|
||||
}
|
||||
}
|
||||
|
96
vendor/k8s.io/kubernetes/test/integration/deployment/util.go
generated
vendored
96
vendor/k8s.io/kubernetes/test/integration/deployment/util.go
generated
vendored
@ -210,7 +210,7 @@ func (d *deploymentTester) waitForDeploymentRevisionAndImage(revision, image str
|
||||
|
||||
func markPodReady(c clientset.Interface, ns string, pod *v1.Pod) error {
|
||||
addPodConditionReady(pod, metav1.Now())
|
||||
_, err := c.Core().Pods(ns).UpdateStatus(pod)
|
||||
_, err := c.CoreV1().Pods(ns).UpdateStatus(pod)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -368,6 +368,10 @@ func (d *deploymentTester) updateReplicaSet(name string, applyUpdate testutil.Up
|
||||
return testutil.UpdateReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
func (d *deploymentTester) updateReplicaSetStatus(name string, applyStatusUpdate testutil.UpdateReplicaSetFunc) (*v1beta1.ReplicaSet, error) {
|
||||
return testutil.UpdateReplicaSetStatusWithRetries(d.c, d.deployment.Namespace, name, applyStatusUpdate, d.t.Logf, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
// waitForDeploymentRollbackCleared waits for deployment either started rolling back or doesn't need to rollback.
|
||||
func (d *deploymentTester) waitForDeploymentRollbackCleared() error {
|
||||
return testutil.WaitForDeploymentRollbackCleared(d.c, d.deployment.Namespace, d.deployment.Name, pollInterval, pollTimeout)
|
||||
@ -378,8 +382,8 @@ func (d *deploymentTester) checkDeploymentRevisionAndImage(revision, image strin
|
||||
return testutil.CheckDeploymentRevisionAndImage(d.c, d.deployment.Namespace, d.deployment.Name, revision, image)
|
||||
}
|
||||
|
||||
func (d *deploymentTester) waitForDeploymentUpdatedReplicasLTE(minUpdatedReplicas int32) error {
|
||||
return testutil.WaitForDeploymentUpdatedReplicasLTE(d.c, d.deployment.Namespace, d.deployment.Name, minUpdatedReplicas, d.deployment.Generation, pollInterval, pollTimeout)
|
||||
func (d *deploymentTester) waitForDeploymentUpdatedReplicasGTE(minUpdatedReplicas int32) error {
|
||||
return testutil.WaitForDeploymentUpdatedReplicasGTE(d.c, d.deployment.Namespace, d.deployment.Name, minUpdatedReplicas, d.deployment.Generation, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
func (d *deploymentTester) waitForDeploymentWithCondition(reason string, condType v1beta1.DeploymentConditionType) error {
|
||||
@ -416,3 +420,89 @@ func (d *deploymentTester) listUpdatedPods() ([]v1.Pod, error) {
|
||||
func (d *deploymentTester) waitRSStable(replicaset *v1beta1.ReplicaSet) error {
|
||||
return testutil.WaitRSStable(d.t, d.c, replicaset, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
func (d *deploymentTester) scaleDeployment(newReplicas int32) error {
|
||||
var err error
|
||||
d.deployment, err = d.updateDeployment(func(update *v1beta1.Deployment) {
|
||||
update.Spec.Replicas = &newReplicas
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed updating deployment %q: %v", d.deployment.Name, err)
|
||||
}
|
||||
|
||||
if err := d.waitForDeploymentCompleteAndMarkPodsReady(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rs, err := d.expectNewReplicaSet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *rs.Spec.Replicas != newReplicas {
|
||||
return fmt.Errorf("expected new replicaset replicas = %d, got %d", newReplicas, *rs.Spec.Replicas)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForReadyReplicas waits for number of ready replicas to equal number of replicas.
|
||||
func (d *deploymentTester) waitForReadyReplicas() error {
|
||||
if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
deployment, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get deployment %q: %v", d.deployment.Name, err)
|
||||
}
|
||||
return deployment.Status.ReadyReplicas == *deployment.Spec.Replicas, nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to wait for .readyReplicas to equal .replicas: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// markUpdatedPodsReadyWithoutComplete marks updated Deployment pods as ready without waiting for deployment to complete.
|
||||
func (d *deploymentTester) markUpdatedPodsReadyWithoutComplete() error {
|
||||
if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
pods, err := d.listUpdatedPods()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for i := range pods {
|
||||
pod := pods[i]
|
||||
if podutil.IsPodReady(&pod) {
|
||||
continue
|
||||
}
|
||||
if err = markPodReady(d.c, d.deployment.Namespace, &pod); err != nil {
|
||||
d.t.Logf("failed to update Deployment pod %q, will retry later: %v", pod.Name, err)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to mark all updated pods as ready: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify all replicas fields of DeploymentStatus have desired count.
|
||||
// Immediately return an error when found a non-matching replicas field.
|
||||
func (d *deploymentTester) checkDeploymentStatusReplicasFields(replicas, updatedReplicas, readyReplicas, availableReplicas, unavailableReplicas int32) error {
|
||||
deployment, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get deployment %q: %v", d.deployment.Name, err)
|
||||
}
|
||||
if deployment.Status.Replicas != replicas {
|
||||
return fmt.Errorf("unexpected .replicas: expect %d, got %d", replicas, deployment.Status.Replicas)
|
||||
}
|
||||
if deployment.Status.UpdatedReplicas != updatedReplicas {
|
||||
return fmt.Errorf("unexpected .updatedReplicas: expect %d, got %d", updatedReplicas, deployment.Status.UpdatedReplicas)
|
||||
}
|
||||
if deployment.Status.ReadyReplicas != readyReplicas {
|
||||
return fmt.Errorf("unexpected .readyReplicas: expect %d, got %d", readyReplicas, deployment.Status.ReadyReplicas)
|
||||
}
|
||||
if deployment.Status.AvailableReplicas != availableReplicas {
|
||||
return fmt.Errorf("unexpected .replicas: expect %d, got %d", availableReplicas, deployment.Status.AvailableReplicas)
|
||||
}
|
||||
if deployment.Status.UnavailableReplicas != unavailableReplicas {
|
||||
return fmt.Errorf("unexpected .replicas: expect %d, got %d", unavailableReplicas, deployment.Status.UnavailableReplicas)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/integration/etcd/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/etcd/BUILD
generated
vendored
@ -12,7 +12,6 @@ go_test(
|
||||
"etcd_storage_path_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/etcd",
|
||||
tags = [
|
||||
"etcd",
|
||||
"integration",
|
||||
@ -24,9 +23,9 @@ go_test(
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/kubectl/cmd/util:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/transport:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
@ -37,6 +36,7 @@ go_test(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
|
143
vendor/k8s.io/kubernetes/test/integration/etcd/etcd_storage_path_test.go
generated
vendored
143
vendor/k8s.io/kubernetes/test/integration/etcd/etcd_storage_path_test.go
generated
vendored
@ -41,6 +41,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
genericapiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
"k8s.io/apiserver/pkg/storage/storagebackend"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
@ -52,13 +53,13 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
kapi "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
||||
// install all APIs
|
||||
_ "k8s.io/kubernetes/pkg/master" // TODO what else is needed
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
)
|
||||
|
||||
// Etcd data for all persisted objects.
|
||||
@ -135,15 +136,17 @@ var etcdStorageData = map[schema.GroupVersionResource]struct {
|
||||
gvr("apps", "v1beta1", "statefulsets"): {
|
||||
stub: `{"metadata": {"name": "ss1"}, "spec": {"selector": {"matchLabels": {"a": "b"}}, "template": {"metadata": {"labels": {"a": "b"}}}}}`,
|
||||
expectedEtcdPath: "/registry/statefulsets/etcdstoragepathtestnamespace/ss1",
|
||||
expectedGVK: gvkP("apps", "v1", "StatefulSet"),
|
||||
},
|
||||
gvr("apps", "v1beta1", "deployments"): {
|
||||
stub: `{"metadata": {"name": "deployment2"}, "spec": {"selector": {"matchLabels": {"f": "z"}}, "template": {"metadata": {"labels": {"f": "z"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container6"}]}}}}`,
|
||||
expectedEtcdPath: "/registry/deployments/etcdstoragepathtestnamespace/deployment2",
|
||||
expectedGVK: gvkP("extensions", "v1beta1", "Deployment"),
|
||||
expectedGVK: gvkP("apps", "v1", "Deployment"),
|
||||
},
|
||||
gvr("apps", "v1beta1", "controllerrevisions"): {
|
||||
stub: `{"metadata":{"name":"crs1"},"data":{"name":"abc","namespace":"default","creationTimestamp":null,"Spec":{"Replicas":0,"Selector":{"matchLabels":{"foo":"bar"}},"Template":{"creationTimestamp":null,"labels":{"foo":"bar"},"Spec":{"Volumes":null,"InitContainers":null,"Containers":null,"RestartPolicy":"Always","TerminationGracePeriodSeconds":null,"ActiveDeadlineSeconds":null,"DNSPolicy":"ClusterFirst","NodeSelector":null,"ServiceAccountName":"","AutomountServiceAccountToken":null,"NodeName":"","SecurityContext":null,"ImagePullSecrets":null,"Hostname":"","Subdomain":"","Affinity":null,"SchedulerName":"","Tolerations":null,"HostAliases":null}},"VolumeClaimTemplates":null,"ServiceName":""},"Status":{"ObservedGeneration":null,"Replicas":0}},"revision":0}`,
|
||||
expectedEtcdPath: "/registry/controllerrevisions/etcdstoragepathtestnamespace/crs1",
|
||||
expectedGVK: gvkP("apps", "v1", "ControllerRevision"),
|
||||
},
|
||||
// --
|
||||
|
||||
@ -151,27 +154,27 @@ var etcdStorageData = map[schema.GroupVersionResource]struct {
|
||||
gvr("apps", "v1beta2", "statefulsets"): {
|
||||
stub: `{"metadata": {"name": "ss2"}, "spec": {"selector": {"matchLabels": {"a": "b"}}, "template": {"metadata": {"labels": {"a": "b"}}}}}`,
|
||||
expectedEtcdPath: "/registry/statefulsets/etcdstoragepathtestnamespace/ss2",
|
||||
expectedGVK: gvkP("apps", "v1beta1", "StatefulSet"),
|
||||
expectedGVK: gvkP("apps", "v1", "StatefulSet"),
|
||||
},
|
||||
gvr("apps", "v1beta2", "deployments"): {
|
||||
stub: `{"metadata": {"name": "deployment3"}, "spec": {"selector": {"matchLabels": {"f": "z"}}, "template": {"metadata": {"labels": {"f": "z"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container6"}]}}}}`,
|
||||
expectedEtcdPath: "/registry/deployments/etcdstoragepathtestnamespace/deployment3",
|
||||
expectedGVK: gvkP("extensions", "v1beta1", "Deployment"),
|
||||
expectedGVK: gvkP("apps", "v1", "Deployment"),
|
||||
},
|
||||
gvr("apps", "v1beta2", "daemonsets"): {
|
||||
stub: `{"metadata": {"name": "ds5"}, "spec": {"selector": {"matchLabels": {"a": "b"}}, "template": {"metadata": {"labels": {"a": "b"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container6"}]}}}}`,
|
||||
expectedEtcdPath: "/registry/daemonsets/etcdstoragepathtestnamespace/ds5",
|
||||
expectedGVK: gvkP("extensions", "v1beta1", "DaemonSet"),
|
||||
expectedGVK: gvkP("apps", "v1", "DaemonSet"),
|
||||
},
|
||||
gvr("apps", "v1beta2", "replicasets"): {
|
||||
stub: `{"metadata": {"name": "rs2"}, "spec": {"selector": {"matchLabels": {"g": "h"}}, "template": {"metadata": {"labels": {"g": "h"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container4"}]}}}}`,
|
||||
expectedEtcdPath: "/registry/replicasets/etcdstoragepathtestnamespace/rs2",
|
||||
expectedGVK: gvkP("extensions", "v1beta1", "ReplicaSet"),
|
||||
expectedGVK: gvkP("apps", "v1", "ReplicaSet"),
|
||||
},
|
||||
gvr("apps", "v1beta2", "controllerrevisions"): {
|
||||
stub: `{"metadata":{"name":"crs2"},"data":{"name":"abc","namespace":"default","creationTimestamp":null,"Spec":{"Replicas":0,"Selector":{"matchLabels":{"foo":"bar"}},"Template":{"creationTimestamp":null,"labels":{"foo":"bar"},"Spec":{"Volumes":null,"InitContainers":null,"Containers":null,"RestartPolicy":"Always","TerminationGracePeriodSeconds":null,"ActiveDeadlineSeconds":null,"DNSPolicy":"ClusterFirst","NodeSelector":null,"ServiceAccountName":"","AutomountServiceAccountToken":null,"NodeName":"","SecurityContext":null,"ImagePullSecrets":null,"Hostname":"","Subdomain":"","Affinity":null,"SchedulerName":"","Tolerations":null,"HostAliases":null}},"VolumeClaimTemplates":null,"ServiceName":""},"Status":{"ObservedGeneration":null,"Replicas":0}},"revision":0}`,
|
||||
expectedEtcdPath: "/registry/controllerrevisions/etcdstoragepathtestnamespace/crs2",
|
||||
expectedGVK: gvkP("apps", "v1beta1", "ControllerRevision"),
|
||||
expectedGVK: gvkP("apps", "v1", "ControllerRevision"),
|
||||
},
|
||||
// --
|
||||
|
||||
@ -179,27 +182,22 @@ var etcdStorageData = map[schema.GroupVersionResource]struct {
|
||||
gvr("apps", "v1", "daemonsets"): {
|
||||
stub: `{"metadata": {"name": "ds6"}, "spec": {"selector": {"matchLabels": {"a": "b"}}, "template": {"metadata": {"labels": {"a": "b"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container6"}]}}}}`,
|
||||
expectedEtcdPath: "/registry/daemonsets/etcdstoragepathtestnamespace/ds6",
|
||||
expectedGVK: gvkP("extensions", "v1beta1", "DaemonSet"),
|
||||
},
|
||||
gvr("apps", "v1", "deployments"): {
|
||||
stub: `{"metadata": {"name": "deployment4"}, "spec": {"selector": {"matchLabels": {"f": "z"}}, "template": {"metadata": {"labels": {"f": "z"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container6"}]}}}}`,
|
||||
expectedEtcdPath: "/registry/deployments/etcdstoragepathtestnamespace/deployment4",
|
||||
expectedGVK: gvkP("extensions", "v1beta1", "Deployment"),
|
||||
},
|
||||
gvr("apps", "v1", "statefulsets"): {
|
||||
stub: `{"metadata": {"name": "ss3"}, "spec": {"selector": {"matchLabels": {"a": "b"}}, "template": {"metadata": {"labels": {"a": "b"}}}}}`,
|
||||
expectedEtcdPath: "/registry/statefulsets/etcdstoragepathtestnamespace/ss3",
|
||||
expectedGVK: gvkP("apps", "v1beta1", "StatefulSet"),
|
||||
},
|
||||
gvr("apps", "v1", "replicasets"): {
|
||||
stub: `{"metadata": {"name": "rs3"}, "spec": {"selector": {"matchLabels": {"g": "h"}}, "template": {"metadata": {"labels": {"g": "h"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container4"}]}}}}`,
|
||||
expectedEtcdPath: "/registry/replicasets/etcdstoragepathtestnamespace/rs3",
|
||||
expectedGVK: gvkP("extensions", "v1beta1", "ReplicaSet"),
|
||||
},
|
||||
gvr("apps", "v1", "controllerrevisions"): {
|
||||
stub: `{"metadata":{"name":"crs3"},"data":{"name":"abc","namespace":"default","creationTimestamp":null,"Spec":{"Replicas":0,"Selector":{"matchLabels":{"foo":"bar"}},"Template":{"creationTimestamp":null,"labels":{"foo":"bar"},"Spec":{"Volumes":null,"InitContainers":null,"Containers":null,"RestartPolicy":"Always","TerminationGracePeriodSeconds":null,"ActiveDeadlineSeconds":null,"DNSPolicy":"ClusterFirst","NodeSelector":null,"ServiceAccountName":"","AutomountServiceAccountToken":null,"NodeName":"","SecurityContext":null,"ImagePullSecrets":null,"Hostname":"","Subdomain":"","Affinity":null,"SchedulerName":"","Tolerations":null,"HostAliases":null}},"VolumeClaimTemplates":null,"ServiceName":""},"Status":{"ObservedGeneration":null,"Replicas":0}},"revision":0}`,
|
||||
expectedEtcdPath: "/registry/controllerrevisions/etcdstoragepathtestnamespace/crs3",
|
||||
expectedGVK: gvkP("apps", "v1beta1", "ControllerRevision"),
|
||||
},
|
||||
// --
|
||||
|
||||
@ -249,7 +247,7 @@ var etcdStorageData = map[schema.GroupVersionResource]struct {
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/events/v1beta1
|
||||
gvr("events.k8s.io", "v1beta1", "events"): {
|
||||
stub: `{"metadata": {"name": "event2"}, "regarding": {"namespace": "etcdstoragepathtestnamespace"}, "note": "some data here", "eventTime": "2017-08-09T15:04:05.000000Z", "reportingInstance": "node-xyz", "reportingController": "k8s.io/my-controller", "action": "DidNothing", "reason": "Lazyness"}`,
|
||||
stub: `{"metadata": {"name": "event2"}, "regarding": {"namespace": "etcdstoragepathtestnamespace"}, "note": "some data here", "eventTime": "2017-08-09T15:04:05.000000Z", "reportingInstance": "node-xyz", "reportingController": "k8s.io/my-controller", "action": "DidNothing", "reason": "Laziness"}`,
|
||||
expectedEtcdPath: "/registry/events/etcdstoragepathtestnamespace/event2",
|
||||
expectedGVK: gvkP("", "v1", "Event"),
|
||||
},
|
||||
@ -259,6 +257,7 @@ var etcdStorageData = map[schema.GroupVersionResource]struct {
|
||||
gvr("extensions", "v1beta1", "daemonsets"): {
|
||||
stub: `{"metadata": {"name": "ds1"}, "spec": {"selector": {"matchLabels": {"u": "t"}}, "template": {"metadata": {"labels": {"u": "t"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container5"}]}}}}`,
|
||||
expectedEtcdPath: "/registry/daemonsets/etcdstoragepathtestnamespace/ds1",
|
||||
expectedGVK: gvkP("apps", "v1", "DaemonSet"),
|
||||
},
|
||||
gvr("extensions", "v1beta1", "podsecuritypolicies"): {
|
||||
stub: `{"metadata": {"name": "psp1"}, "spec": {"fsGroup": {"rule": "RunAsAny"}, "privileged": true, "runAsUser": {"rule": "RunAsAny"}, "seLinux": {"rule": "MustRunAs"}, "supplementalGroups": {"rule": "RunAsAny"}}}`,
|
||||
@ -276,10 +275,12 @@ var etcdStorageData = map[schema.GroupVersionResource]struct {
|
||||
gvr("extensions", "v1beta1", "deployments"): {
|
||||
stub: `{"metadata": {"name": "deployment1"}, "spec": {"selector": {"matchLabels": {"f": "z"}}, "template": {"metadata": {"labels": {"f": "z"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container6"}]}}}}`,
|
||||
expectedEtcdPath: "/registry/deployments/etcdstoragepathtestnamespace/deployment1",
|
||||
expectedGVK: gvkP("apps", "v1", "Deployment"),
|
||||
},
|
||||
gvr("extensions", "v1beta1", "replicasets"): {
|
||||
stub: `{"metadata": {"name": "rs1"}, "spec": {"selector": {"matchLabels": {"g": "h"}}, "template": {"metadata": {"labels": {"g": "h"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container4"}]}}}}`,
|
||||
expectedEtcdPath: "/registry/replicasets/etcdstoragepathtestnamespace/rs1",
|
||||
expectedGVK: gvkP("apps", "v1", "ReplicaSet"),
|
||||
},
|
||||
// --
|
||||
|
||||
@ -295,12 +296,25 @@ var etcdStorageData = map[schema.GroupVersionResource]struct {
|
||||
stub: `{"metadata": {"name": "pdb1"}, "spec": {"selector": {"matchLabels": {"anokkey": "anokvalue"}}}}`,
|
||||
expectedEtcdPath: "/registry/poddisruptionbudgets/etcdstoragepathtestnamespace/pdb1",
|
||||
},
|
||||
gvr("policy", "v1beta1", "podsecuritypolicies"): {
|
||||
stub: `{"metadata": {"name": "psp2"}, "spec": {"fsGroup": {"rule": "RunAsAny"}, "privileged": true, "runAsUser": {"rule": "RunAsAny"}, "seLinux": {"rule": "MustRunAs"}, "supplementalGroups": {"rule": "RunAsAny"}}}`,
|
||||
expectedEtcdPath: "/registry/podsecuritypolicy/psp2",
|
||||
expectedGVK: gvkP("extensions", "v1beta1", "PodSecurityPolicy"),
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/storage/v1alpha1
|
||||
gvr("storage.k8s.io", "v1alpha1", "volumeattachments"): {
|
||||
stub: `{"metadata": {"name": "va1"}, "spec": {"attacher": "gce", "nodeName": "localhost", "source": {"persistentVolumeName": "pv1"}}}`,
|
||||
expectedEtcdPath: "/registry/volumeattachments/va1",
|
||||
expectedGVK: gvkP("storage.k8s.io", "v1beta1", "VolumeAttachment"),
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/storage/v1beta1
|
||||
gvr("storage.k8s.io", "v1beta1", "volumeattachments"): {
|
||||
stub: `{"metadata": {"name": "va2"}, "spec": {"attacher": "gce", "nodeName": "localhost", "source": {"persistentVolumeName": "pv2"}}}`,
|
||||
expectedEtcdPath: "/registry/volumeattachments/va2",
|
||||
},
|
||||
// --
|
||||
|
||||
@ -433,7 +447,8 @@ var ephemeralWhiteList = createEphemeralWhiteList(
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/authentication/v1
|
||||
gvr("authentication.k8s.io", "v1", "tokenreviews"), // not stored in etcd
|
||||
gvr("authentication.k8s.io", "v1", "tokenreviews"), // not stored in etcd
|
||||
gvr("authentication.k8s.io", "v1", "tokenrequests"), // not stored in etcd
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/authorization/v1beta1
|
||||
@ -707,47 +722,41 @@ func startRealMasterOrDie(t *testing.T, certDir string) (*allClient, clientv3.KV
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
kubeAPIServerOptions := options.NewServerRunOptions()
|
||||
kubeAPIServerOptions.SecureServing.BindAddress = net.ParseIP("127.0.0.1")
|
||||
kubeAPIServerOptions.SecureServing.ServerCert.CertDirectory = certDir
|
||||
kubeAPIServerOptions.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURL()}
|
||||
kubeAPIServerOptions.Etcd.DefaultStorageMediaType = runtime.ContentTypeJSON // TODO use protobuf?
|
||||
kubeAPIServerOptions.ServiceClusterIPRange = *defaultServiceClusterIPRange
|
||||
kubeAPIServerOptions.Authorization.Mode = "RBAC"
|
||||
listener, _, err := genericapiserveroptions.CreateListener("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// always get a fresh port in case something claimed the old one
|
||||
kubePort, err := framework.FindFreeLocalPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerOptions := options.NewServerRunOptions()
|
||||
kubeAPIServerOptions.SecureServing.Listener = listener
|
||||
kubeAPIServerOptions.SecureServing.ServerCert.CertDirectory = certDir
|
||||
kubeAPIServerOptions.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURL()}
|
||||
kubeAPIServerOptions.Etcd.DefaultStorageMediaType = runtime.ContentTypeJSON // TODO use protobuf?
|
||||
kubeAPIServerOptions.ServiceClusterIPRange = *defaultServiceClusterIPRange
|
||||
kubeAPIServerOptions.Authorization.Mode = "RBAC"
|
||||
kubeAPIServerOptions.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
|
||||
|
||||
kubeAPIServerOptions.SecureServing.BindPort = kubePort
|
||||
tunneler, proxyTransport, err := app.CreateNodeDialer(kubeAPIServerOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerConfig, sharedInformers, versionedInformers, _, _, err := app.CreateKubeAPIServerConfig(kubeAPIServerOptions, tunneler, proxyTransport)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tunneler, proxyTransport, err := app.CreateNodeDialer(kubeAPIServerOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerConfig, sharedInformers, versionedInformers, _, _, err := app.CreateKubeAPIServerConfig(kubeAPIServerOptions, tunneler, proxyTransport)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerConfig.ExtraConfig.APIResourceConfigSource = &allResourceSource{} // force enable all resources
|
||||
|
||||
kubeAPIServerConfig.ExtraConfig.APIResourceConfigSource = &allResourceSource{} // force enable all resources
|
||||
kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.EmptyDelegate, sharedInformers, versionedInformers)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.EmptyDelegate, sharedInformers, versionedInformers)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeClientConfigValue.Store(kubeAPIServerConfig.GenericConfig.LoopbackClientConfig)
|
||||
storageConfigValue.Store(kubeAPIServerOptions.Etcd.StorageConfig)
|
||||
|
||||
kubeClientConfigValue.Store(kubeAPIServerConfig.GenericConfig.LoopbackClientConfig)
|
||||
storageConfigValue.Store(kubeAPIServerOptions.Etcd.StorageConfig)
|
||||
|
||||
if err := kubeAPIServer.GenericAPIServer.PrepareRun().Run(wait.NeverStop); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
if err := kubeAPIServer.GenericAPIServer.PrepareRun().Run(wait.NeverStop); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -793,7 +802,7 @@ func startRealMasterOrDie(t *testing.T, certDir string) (*allClient, clientv3.KV
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kvClient, err := getEtcdKVClient(storageConfig)
|
||||
kvClient, err := integration.GetEtcdKVClient(storageConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -934,7 +943,7 @@ func (c *allClient) verb(verb string, gvk schema.GroupVersionKind) (*restclient.
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return restclient.NewRequest(c.client, verb, baseURL, versionedAPIPath, contentConfig, *serializers, c.backoff, c.config.RateLimiter), nil
|
||||
return restclient.NewRequest(c.client, verb, baseURL, versionedAPIPath, contentConfig, *serializers, c.backoff, c.config.RateLimiter, 0), nil
|
||||
}
|
||||
|
||||
func (c *allClient) create(stub, ns string, mapping *meta.RESTMapping, all *[]cleanupData) error {
|
||||
@ -1112,35 +1121,7 @@ func diffMapKeys(a, b interface{}, stringer func(interface{}) string) []string {
|
||||
return ret
|
||||
}
|
||||
|
||||
func getEtcdKVClient(config storagebackend.Config) (clientv3.KV, error) {
|
||||
tlsInfo := transport.TLSInfo{
|
||||
CertFile: config.CertFile,
|
||||
KeyFile: config.KeyFile,
|
||||
CAFile: config.CAFile,
|
||||
}
|
||||
|
||||
tlsConfig, err := tlsInfo.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := clientv3.Config{
|
||||
Endpoints: config.ServerList,
|
||||
TLS: tlsConfig,
|
||||
}
|
||||
|
||||
c, err := clientv3.New(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return clientv3.NewKV(c), nil
|
||||
}
|
||||
|
||||
type allResourceSource struct{}
|
||||
|
||||
func (*allResourceSource) AnyVersionOfResourceEnabled(resource schema.GroupResource) bool { return true }
|
||||
func (*allResourceSource) AllResourcesForVersionEnabled(version schema.GroupVersion) bool { return true }
|
||||
func (*allResourceSource) AnyResourcesForGroupEnabled(group string) bool { return true }
|
||||
func (*allResourceSource) ResourceEnabled(resource schema.GroupVersionResource) bool { return true }
|
||||
func (*allResourceSource) AnyResourcesForVersionEnabled(version schema.GroupVersion) bool { return true }
|
||||
func (*allResourceSource) AnyVersionForGroupEnabled(group string) bool { return true }
|
||||
func (*allResourceSource) VersionEnabled(version schema.GroupVersion) bool { return true }
|
||||
|
1
vendor/k8s.io/kubernetes/test/integration/evictions/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/integration/evictions/BUILD
generated
vendored
@ -12,7 +12,6 @@ go_test(
|
||||
"evictions_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/evictions",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/controller/disruption:go_default_library",
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/examples/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/examples/BUILD
generated
vendored
@ -12,7 +12,6 @@ go_test(
|
||||
"apiserver_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/examples",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app:go_default_library",
|
||||
@ -22,6 +21,7 @@ go_test(
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
|
206
vendor/k8s.io/kubernetes/test/integration/examples/apiserver_test.go
generated
vendored
206
vendor/k8s.io/kubernetes/test/integration/examples/apiserver_test.go
generated
vendored
@ -22,9 +22,9 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
@ -34,6 +34,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
genericapiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
client "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
@ -90,52 +91,48 @@ func TestAggregatedAPIServer(t *testing.T) {
|
||||
|
||||
kubeClientConfigValue := atomic.Value{}
|
||||
go func() {
|
||||
for {
|
||||
// always get a fresh port in case something claimed the old one
|
||||
kubePort, err := framework.FindFreeLocalPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
listener, _, err := genericapiserveroptions.CreateListener("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kubeAPIServerOptions := options.NewServerRunOptions()
|
||||
kubeAPIServerOptions.SecureServing.BindAddress = net.ParseIP("127.0.0.1")
|
||||
kubeAPIServerOptions.SecureServing.BindPort = kubePort
|
||||
kubeAPIServerOptions.SecureServing.ServerCert.CertDirectory = certDir
|
||||
kubeAPIServerOptions.InsecureServing.BindPort = 0
|
||||
kubeAPIServerOptions.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURL()}
|
||||
kubeAPIServerOptions.ServiceClusterIPRange = *defaultServiceClusterIPRange
|
||||
kubeAPIServerOptions.Authentication.RequestHeader.UsernameHeaders = []string{"X-Remote-User"}
|
||||
kubeAPIServerOptions.Authentication.RequestHeader.GroupHeaders = []string{"X-Remote-Group"}
|
||||
kubeAPIServerOptions.Authentication.RequestHeader.ExtraHeaderPrefixes = []string{"X-Remote-Extra-"}
|
||||
kubeAPIServerOptions.Authentication.RequestHeader.AllowedNames = []string{"kube-aggregator"}
|
||||
kubeAPIServerOptions.Authentication.RequestHeader.ClientCAFile = proxyCACertFile.Name()
|
||||
kubeAPIServerOptions.Authentication.ClientCert.ClientCA = clientCACertFile.Name()
|
||||
kubeAPIServerOptions.Authorization.Mode = "RBAC"
|
||||
kubeAPIServerOptions := options.NewServerRunOptions()
|
||||
kubeAPIServerOptions.SecureServing.Listener = listener
|
||||
kubeAPIServerOptions.SecureServing.BindAddress = net.ParseIP("127.0.0.1")
|
||||
kubeAPIServerOptions.SecureServing.ServerCert.CertDirectory = certDir
|
||||
kubeAPIServerOptions.InsecureServing.BindPort = 0
|
||||
kubeAPIServerOptions.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURL()}
|
||||
kubeAPIServerOptions.ServiceClusterIPRange = *defaultServiceClusterIPRange
|
||||
kubeAPIServerOptions.Authentication.RequestHeader.UsernameHeaders = []string{"X-Remote-User"}
|
||||
kubeAPIServerOptions.Authentication.RequestHeader.GroupHeaders = []string{"X-Remote-Group"}
|
||||
kubeAPIServerOptions.Authentication.RequestHeader.ExtraHeaderPrefixes = []string{"X-Remote-Extra-"}
|
||||
kubeAPIServerOptions.Authentication.RequestHeader.AllowedNames = []string{"kube-aggregator"}
|
||||
kubeAPIServerOptions.Authentication.RequestHeader.ClientCAFile = proxyCACertFile.Name()
|
||||
kubeAPIServerOptions.Authentication.ClientCert.ClientCA = clientCACertFile.Name()
|
||||
kubeAPIServerOptions.Authorization.Mode = "RBAC"
|
||||
|
||||
tunneler, proxyTransport, err := app.CreateNodeDialer(kubeAPIServerOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerConfig, sharedInformers, versionedInformers, _, _, err := app.CreateKubeAPIServerConfig(kubeAPIServerOptions, tunneler, proxyTransport)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Adjust the loopback config for external use (external server name and CA)
|
||||
kubeAPIServerClientConfig := rest.CopyConfig(kubeAPIServerConfig.GenericConfig.LoopbackClientConfig)
|
||||
kubeAPIServerClientConfig.CAFile = path.Join(certDir, "apiserver.crt")
|
||||
kubeAPIServerClientConfig.CAData = nil
|
||||
kubeAPIServerClientConfig.ServerName = ""
|
||||
kubeClientConfigValue.Store(kubeAPIServerClientConfig)
|
||||
tunneler, proxyTransport, err := app.CreateNodeDialer(kubeAPIServerOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerConfig, sharedInformers, versionedInformers, _, _, err := app.CreateKubeAPIServerConfig(kubeAPIServerOptions, tunneler, proxyTransport)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Adjust the loopback config for external use (external server name and CA)
|
||||
kubeAPIServerClientConfig := rest.CopyConfig(kubeAPIServerConfig.GenericConfig.LoopbackClientConfig)
|
||||
kubeAPIServerClientConfig.CAFile = path.Join(certDir, "apiserver.crt")
|
||||
kubeAPIServerClientConfig.CAData = nil
|
||||
kubeAPIServerClientConfig.ServerName = ""
|
||||
kubeClientConfigValue.Store(kubeAPIServerClientConfig)
|
||||
|
||||
kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.EmptyDelegate, sharedInformers, versionedInformers)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.EmptyDelegate, sharedInformers, versionedInformers)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := kubeAPIServer.GenericAPIServer.PrepareRun().Run(wait.NeverStop); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if err := kubeAPIServer.GenericAPIServer.PrepareRun().Run(wait.NeverStop); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -154,9 +151,13 @@ func TestAggregatedAPIServer(t *testing.T) {
|
||||
t.Log(err)
|
||||
return false, nil
|
||||
}
|
||||
if _, err := kubeClient.Discovery().ServerVersion(); err != nil {
|
||||
|
||||
healthStatus := 0
|
||||
kubeClient.Discovery().RESTClient().Get().AbsPath("/healthz").Do().StatusCode(&healthStatus)
|
||||
if healthStatus != http.StatusOK {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
@ -177,32 +178,30 @@ func TestAggregatedAPIServer(t *testing.T) {
|
||||
|
||||
// start the wardle server to prove we can aggregate it
|
||||
go func() {
|
||||
for {
|
||||
// always get a fresh port in case something claimed the old one
|
||||
wardlePortInt, err := framework.FindFreeLocalPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
atomic.StoreInt32(wardlePort, int32(wardlePortInt))
|
||||
wardleCmd := sampleserver.NewCommandStartWardleServer(os.Stdout, os.Stderr, stopCh)
|
||||
wardleCmd.SetArgs([]string{
|
||||
"--bind-address", "127.0.0.1",
|
||||
"--secure-port", strconv.Itoa(wardlePortInt),
|
||||
"--requestheader-username-headers=X-Remote-User",
|
||||
"--requestheader-group-headers=X-Remote-Group",
|
||||
"--requestheader-extra-headers-prefix=X-Remote-Extra-",
|
||||
"--requestheader-client-ca-file=" + proxyCACertFile.Name(),
|
||||
"--requestheader-allowed-names=kube-aggregator",
|
||||
"--authentication-kubeconfig", kubeconfigFile.Name(),
|
||||
"--authorization-kubeconfig", kubeconfigFile.Name(),
|
||||
"--etcd-servers", framework.GetEtcdURL(),
|
||||
"--cert-dir", wardleCertDir,
|
||||
"--kubeconfig", kubeconfigFile.Name(),
|
||||
})
|
||||
if err := wardleCmd.Execute(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
listener, port, err := genericapiserveroptions.CreateListener("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
atomic.StoreInt32(wardlePort, int32(port))
|
||||
|
||||
o := sampleserver.NewWardleServerOptions(os.Stdout, os.Stderr)
|
||||
o.RecommendedOptions.SecureServing.Listener = listener
|
||||
o.RecommendedOptions.SecureServing.BindAddress = net.ParseIP("127.0.0.1")
|
||||
wardleCmd := sampleserver.NewCommandStartWardleServer(o, stopCh)
|
||||
wardleCmd.SetArgs([]string{
|
||||
"--requestheader-username-headers=X-Remote-User",
|
||||
"--requestheader-group-headers=X-Remote-Group",
|
||||
"--requestheader-extra-headers-prefix=X-Remote-Extra-",
|
||||
"--requestheader-client-ca-file=" + proxyCACertFile.Name(),
|
||||
"--requestheader-allowed-names=kube-aggregator",
|
||||
"--authentication-kubeconfig", kubeconfigFile.Name(),
|
||||
"--authorization-kubeconfig", kubeconfigFile.Name(),
|
||||
"--etcd-servers", framework.GetEtcdURL(),
|
||||
"--cert-dir", wardleCertDir,
|
||||
"--kubeconfig", kubeconfigFile.Name(),
|
||||
})
|
||||
if err := wardleCmd.Execute(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -220,8 +219,9 @@ func TestAggregatedAPIServer(t *testing.T) {
|
||||
t.Log(err)
|
||||
return false, nil
|
||||
}
|
||||
if _, err := wardleClient.Discovery().ServerVersion(); err != nil {
|
||||
t.Log(err)
|
||||
healthStatus := 0
|
||||
wardleClient.Discovery().RESTClient().Get().AbsPath("/healthz").Do().StatusCode(&healthStatus)
|
||||
if healthStatus != http.StatusOK {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@ -255,30 +255,29 @@ func TestAggregatedAPIServer(t *testing.T) {
|
||||
aggregatorPort := new(int32)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
// always get a fresh port in case something claimed the old one
|
||||
aggregatorPortInt, err := framework.FindFreeLocalPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
atomic.StoreInt32(aggregatorPort, int32(aggregatorPortInt))
|
||||
aggregatorCmd := kubeaggregatorserver.NewCommandStartAggregator(os.Stdout, os.Stderr, stopCh)
|
||||
aggregatorCmd.SetArgs([]string{
|
||||
"--bind-address", "127.0.0.1",
|
||||
"--secure-port", strconv.Itoa(aggregatorPortInt),
|
||||
"--requestheader-username-headers", "",
|
||||
"--proxy-client-cert-file", proxyClientCertFile.Name(),
|
||||
"--proxy-client-key-file", proxyClientKeyFile.Name(),
|
||||
"--kubeconfig", kubeconfigFile.Name(),
|
||||
"--authentication-kubeconfig", kubeconfigFile.Name(),
|
||||
"--authorization-kubeconfig", kubeconfigFile.Name(),
|
||||
"--etcd-servers", framework.GetEtcdURL(),
|
||||
"--cert-dir", aggregatorCertDir,
|
||||
})
|
||||
if err := aggregatorCmd.Execute(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
listener, port, err := genericapiserveroptions.CreateListener("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
atomic.StoreInt32(aggregatorPort, int32(port))
|
||||
|
||||
o := kubeaggregatorserver.NewDefaultOptions(os.Stdout, os.Stderr)
|
||||
o.RecommendedOptions.SecureServing.Listener = listener
|
||||
o.RecommendedOptions.SecureServing.BindAddress = net.ParseIP("127.0.0.1")
|
||||
aggregatorCmd := kubeaggregatorserver.NewCommandStartAggregator(o, stopCh)
|
||||
aggregatorCmd.SetArgs([]string{
|
||||
"--requestheader-username-headers", "",
|
||||
"--proxy-client-cert-file", proxyClientCertFile.Name(),
|
||||
"--proxy-client-key-file", proxyClientKeyFile.Name(),
|
||||
"--kubeconfig", kubeconfigFile.Name(),
|
||||
"--authentication-kubeconfig", kubeconfigFile.Name(),
|
||||
"--authorization-kubeconfig", kubeconfigFile.Name(),
|
||||
"--etcd-servers", framework.GetEtcdURL(),
|
||||
"--cert-dir", aggregatorCertDir,
|
||||
})
|
||||
|
||||
if err := aggregatorCmd.Execute(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -295,7 +294,9 @@ func TestAggregatedAPIServer(t *testing.T) {
|
||||
// this happens if we race the API server for writing the cert
|
||||
return false, nil
|
||||
}
|
||||
if _, err := aggregatorDiscoveryClient.Discovery().ServerVersion(); err != nil {
|
||||
healthStatus := 0
|
||||
aggregatorDiscoveryClient.Discovery().RESTClient().Get().AbsPath("/healthz").Do().StatusCode(&healthStatus)
|
||||
if healthStatus != http.StatusOK {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@ -304,7 +305,7 @@ func TestAggregatedAPIServer(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// now we're finally ready to test. These are what's run by defautl now
|
||||
// now we're finally ready to test. These are what's run by default now
|
||||
testAPIGroupList(t, wardleClient.Discovery().RESTClient())
|
||||
testAPIGroup(t, wardleClient.Discovery().RESTClient())
|
||||
testAPIResourceList(t, wardleClient.Discovery().RESTClient())
|
||||
@ -342,8 +343,8 @@ func TestAggregatedAPIServer(t *testing.T) {
|
||||
_, err = aggregatorClient.ApiregistrationV1beta1().APIServices().Create(&apiregistrationv1beta1.APIService{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "v1."},
|
||||
Spec: apiregistrationv1beta1.APIServiceSpec{
|
||||
// register this as a loca service so it doesn't try to lookup the default kubernetes service
|
||||
// which will have an unroutable IP address since its fake.
|
||||
// register this as a local service so it doesn't try to lookup the default kubernetes service
|
||||
// which will have an unroutable IP address since it's fake.
|
||||
Group: "",
|
||||
Version: "v1",
|
||||
GroupPriorityMinimum: 100,
|
||||
@ -460,8 +461,3 @@ func testAPIResourceList(t *testing.T, client rest.Interface) {
|
||||
assert.Equal(t, "flunders", apiResourceList.APIResources[1].Name)
|
||||
assert.True(t, apiResourceList.APIResources[1].Namespaced)
|
||||
}
|
||||
|
||||
const (
|
||||
policyCachePollInterval = 100 * time.Millisecond
|
||||
policyCachePollTimeout = 5 * time.Second
|
||||
)
|
||||
|
6
vendor/k8s.io/kubernetes/test/integration/framework/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/test/integration/framework/BUILD
generated
vendored
@ -22,13 +22,8 @@ go_library(
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/batch:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/policy/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/replication:go_default_library",
|
||||
"//pkg/generated/openapi:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/kubelet/client:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//pkg/util/env:go_default_library",
|
||||
@ -66,7 +61,6 @@ go_library(
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
158
vendor/k8s.io/kubernetes/test/integration/framework/master_utils.go
generated
vendored
158
vendor/k8s.io/kubernetes/test/integration/framework/master_utils.go
generated
vendored
@ -21,9 +21,6 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"path"
|
||||
goruntime "runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-openapi/spec"
|
||||
@ -45,7 +42,6 @@ import (
|
||||
authenticatorunion "k8s.io/apiserver/pkg/authentication/request/union"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
authauthorizer "k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizerfactory"
|
||||
authorizerunion "k8s.io/apiserver/pkg/authorization/union"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
@ -53,16 +49,12 @@ import (
|
||||
serverstorage "k8s.io/apiserver/pkg/server/storage"
|
||||
"k8s.io/apiserver/pkg/storage/storagebackend"
|
||||
"k8s.io/client-go/informers"
|
||||
extinformers "k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
||||
"k8s.io/kubernetes/pkg/generated/openapi"
|
||||
kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
|
||||
"k8s.io/kubernetes/pkg/master"
|
||||
@ -70,37 +62,6 @@ import (
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/admit"
|
||||
)
|
||||
|
||||
const (
|
||||
// Timeout used in benchmarks, to eg: scale an rc
|
||||
DefaultTimeout = 30 * time.Minute
|
||||
|
||||
// Rc manifest used to create pods for benchmarks.
|
||||
// TODO: Convert this to a full path?
|
||||
TestRCManifest = "benchmark-controller.json"
|
||||
)
|
||||
|
||||
// MasterComponents is a control struct for all master components started via NewMasterComponents.
|
||||
// TODO: Include all master components (scheduler, nodecontroller).
|
||||
// TODO: Reconcile with integration.go, currently the master used there doesn't understand
|
||||
// how to restart cleanly, which is required for each iteration of a benchmark. The integration
|
||||
// tests also don't make it easy to isolate and turn off components at will.
|
||||
type MasterComponents struct {
|
||||
// Raw http server in front of the master
|
||||
ApiServer *httptest.Server
|
||||
// Kubernetes master, contains an embedded etcd storage
|
||||
KubeMaster *master.Master
|
||||
// Restclient used to talk to the kubernetes master
|
||||
ClientSet clientset.Interface
|
||||
// Replication controller manager
|
||||
ControllerManager *replicationcontroller.ReplicationManager
|
||||
// CloseFn shuts down the server
|
||||
CloseFn CloseFunc
|
||||
// Channel for stop signals to rc manager
|
||||
rcStopCh chan struct{}
|
||||
// Used to stop master components individually, and via MasterComponents.Stop
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
// Config is a struct of configuration directives for NewMasterComponents.
|
||||
type Config struct {
|
||||
// If nil, a default is used, partially filled configs will not get populated.
|
||||
@ -113,37 +74,10 @@ type Config struct {
|
||||
// TODO: Add configs for endpoints controller, scheduler etc
|
||||
}
|
||||
|
||||
// NewMasterComponents creates, initializes and starts master components based on the given config.
|
||||
func NewMasterComponents(c *Config) *MasterComponents {
|
||||
m, s, closeFn := startMasterOrDie(c.MasterConfig, nil, nil)
|
||||
// TODO: Allow callers to pipe through a different master url and create a client/start components using it.
|
||||
glog.Infof("Master %+v", s.URL)
|
||||
// TODO: caesarxuchao: remove this client when the refactoring of client libraray is done.
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}, QPS: c.QPS, Burst: c.Burst})
|
||||
rcStopCh := make(chan struct{})
|
||||
informerFactory := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
||||
controllerManager := replicationcontroller.NewReplicationManager(informerFactory.Core().V1().Pods(), informerFactory.Core().V1().ReplicationControllers(), clientset, c.Burst)
|
||||
|
||||
// TODO: Support events once we can cleanly shutdown an event recorder.
|
||||
controllerManager.SetEventRecorder(&record.FakeRecorder{})
|
||||
if c.StartReplicationManager {
|
||||
informerFactory.Start(rcStopCh)
|
||||
go controllerManager.Run(goruntime.NumCPU(), rcStopCh)
|
||||
}
|
||||
return &MasterComponents{
|
||||
ApiServer: s,
|
||||
KubeMaster: m,
|
||||
ClientSet: clientset,
|
||||
ControllerManager: controllerManager,
|
||||
CloseFn: closeFn,
|
||||
rcStopCh: rcStopCh,
|
||||
}
|
||||
}
|
||||
|
||||
// alwaysAllow always allows an action
|
||||
type alwaysAllow struct{}
|
||||
|
||||
func (alwaysAllow) Authorize(requestAttributes authauthorizer.Attributes) (authorizer.Decision, string, error) {
|
||||
func (alwaysAllow) Authorize(requestAttributes authorizer.Attributes) (authorizer.Decision, string, error) {
|
||||
return authorizer.DecisionAllow, "always allow", nil
|
||||
}
|
||||
|
||||
@ -225,17 +159,17 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv
|
||||
}
|
||||
|
||||
tokenAuthenticator := authenticatorfactory.NewFromTokens(tokens)
|
||||
if masterConfig.GenericConfig.Authenticator == nil {
|
||||
masterConfig.GenericConfig.Authenticator = authenticatorunion.New(tokenAuthenticator, authauthenticator.RequestFunc(alwaysEmpty))
|
||||
if masterConfig.GenericConfig.Authentication.Authenticator == nil {
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, authauthenticator.RequestFunc(alwaysEmpty))
|
||||
} else {
|
||||
masterConfig.GenericConfig.Authenticator = authenticatorunion.New(tokenAuthenticator, masterConfig.GenericConfig.Authenticator)
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, masterConfig.GenericConfig.Authentication.Authenticator)
|
||||
}
|
||||
|
||||
if masterConfig.GenericConfig.Authorizer != nil {
|
||||
if masterConfig.GenericConfig.Authorization.Authorizer != nil {
|
||||
tokenAuthorizer := authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup)
|
||||
masterConfig.GenericConfig.Authorizer = authorizerunion.New(tokenAuthorizer, masterConfig.GenericConfig.Authorizer)
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = authorizerunion.New(tokenAuthorizer, masterConfig.GenericConfig.Authorization.Authorizer)
|
||||
} else {
|
||||
masterConfig.GenericConfig.Authorizer = alwaysAllow{}
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = alwaysAllow{}
|
||||
}
|
||||
|
||||
masterConfig.GenericConfig.LoopbackClientConfig.BearerToken = privilegedLoopbackToken
|
||||
@ -245,7 +179,7 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv
|
||||
glog.Fatal(err)
|
||||
}
|
||||
|
||||
sharedInformers := extinformers.NewSharedInformerFactory(clientset, masterConfig.GenericConfig.LoopbackClientConfig.Timeout)
|
||||
sharedInformers := informers.NewSharedInformerFactory(clientset, masterConfig.GenericConfig.LoopbackClientConfig.Timeout)
|
||||
m, err = masterConfig.Complete(sharedInformers).New(genericapiserver.EmptyDelegate)
|
||||
if err != nil {
|
||||
closeFn()
|
||||
@ -300,7 +234,10 @@ func NewMasterConfig() *master.Config {
|
||||
// FIXME (soltysh): this GroupVersionResource override should be configurable
|
||||
// we need to set both for the whole group and for cronjobs, separately
|
||||
resourceEncoding.SetVersionEncoding(batch.GroupName, *testapi.Batch.GroupVersion(), schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal})
|
||||
resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: "batch", Resource: "cronjobs"}, schema.GroupVersion{Group: batch.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal})
|
||||
resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: batch.GroupName, Resource: "cronjobs"}, schema.GroupVersion{Group: batch.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal})
|
||||
// we also need to set both for the storage group and for volumeattachments, separately
|
||||
resourceEncoding.SetVersionEncoding(storage.GroupName, *testapi.Storage.GroupVersion(), schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal})
|
||||
resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: storage.GroupName, Resource: "volumeattachments"}, schema.GroupVersion{Group: storage.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal})
|
||||
|
||||
storageFactory := serverstorage.NewDefaultStorageFactory(etcdOptions.StorageConfig, runtime.ContentTypeJSON, ns, resourceEncoding, master.DefaultAPIResourceConfigSource(), nil)
|
||||
storageFactory.SetSerializer(
|
||||
@ -343,7 +280,7 @@ func NewMasterConfig() *master.Config {
|
||||
genericConfig := genericapiserver.NewConfig(legacyscheme.Codecs)
|
||||
kubeVersion := version.Get()
|
||||
genericConfig.Version = &kubeVersion
|
||||
genericConfig.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer()
|
||||
genericConfig.Authorization.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer()
|
||||
genericConfig.AdmissionControl = admit.NewAlwaysAdmit()
|
||||
genericConfig.EnableMetrics = true
|
||||
|
||||
@ -374,22 +311,6 @@ func NewIntegrationTestMasterConfig() *master.Config {
|
||||
return masterConfig
|
||||
}
|
||||
|
||||
func (m *MasterComponents) stopRCManager() {
|
||||
close(m.rcStopCh)
|
||||
}
|
||||
|
||||
func (m *MasterComponents) Stop(apiServer, rcManager bool) {
|
||||
glog.Infof("Stopping master components")
|
||||
if rcManager {
|
||||
// Ordering matters because the apiServer will only shutdown when pending
|
||||
// requests are done
|
||||
m.once.Do(m.stopRCManager)
|
||||
}
|
||||
if apiServer {
|
||||
m.CloseFn()
|
||||
}
|
||||
}
|
||||
|
||||
// CloseFunc can be called to cleanup the master
|
||||
type CloseFunc func()
|
||||
|
||||
@ -406,59 +327,6 @@ func RunAMasterUsingServer(masterConfig *master.Config, s *httptest.Server, mast
|
||||
return startMasterOrDie(masterConfig, s, masterReceiver)
|
||||
}
|
||||
|
||||
// Task is a function passed to worker goroutines by RunParallel.
|
||||
// The function needs to implement its own thread safety.
|
||||
type Task func(id int) error
|
||||
|
||||
// RunParallel spawns a goroutine per task in the given queue
|
||||
func RunParallel(task Task, numTasks, numWorkers int) {
|
||||
start := time.Now()
|
||||
if numWorkers <= 0 {
|
||||
numWorkers = numTasks
|
||||
}
|
||||
defer func() {
|
||||
glog.Infof("RunParallel took %v for %d tasks and %d workers", time.Since(start), numTasks, numWorkers)
|
||||
}()
|
||||
var wg sync.WaitGroup
|
||||
semCh := make(chan struct{}, numWorkers)
|
||||
wg.Add(numTasks)
|
||||
for id := 0; id < numTasks; id++ {
|
||||
go func(id int) {
|
||||
semCh <- struct{}{}
|
||||
err := task(id)
|
||||
if err != nil {
|
||||
glog.Fatalf("Worker failed with %v", err)
|
||||
}
|
||||
<-semCh
|
||||
wg.Done()
|
||||
}(id)
|
||||
}
|
||||
wg.Wait()
|
||||
close(semCh)
|
||||
}
|
||||
|
||||
// FindFreeLocalPort returns the number of an available port number on
|
||||
// the loopback interface. Useful for determining the port to launch
|
||||
// a server on. Error handling required - there is a non-zero chance
|
||||
// that the returned port number will be bound by another process
|
||||
// after this function returns.
|
||||
func FindFreeLocalPort() (int, error) {
|
||||
l, err := net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer l.Close()
|
||||
_, portStr, err := net.SplitHostPort(l.Addr().String())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return port, nil
|
||||
}
|
||||
|
||||
// SharedEtcd creates a storage config for a shared etcd instance, with a unique prefix.
|
||||
func SharedEtcd() *storagebackend.Config {
|
||||
cfg := storagebackend.NewDefaultConfig(path.Join(uuid.New(), "registry"), nil)
|
||||
|
9
vendor/k8s.io/kubernetes/test/integration/framework/perf_utils.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/integration/framework/perf_utils.go
generated
vendored
@ -73,7 +73,14 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
|
||||
},
|
||||
}
|
||||
for i := 0; i < numNodes; i++ {
|
||||
if _, err := p.client.CoreV1().Nodes().Create(baseNode); err != nil {
|
||||
var err error
|
||||
for retry := 0; retry < retries; retry++ {
|
||||
_, err = p.client.CoreV1().Nodes().Create(baseNode)
|
||||
if err == nil || !e2eframework.IsRetryableAPIError(err) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating node: %v", err)
|
||||
}
|
||||
}
|
||||
|
62
vendor/k8s.io/kubernetes/test/integration/framework/util.go
generated
vendored
62
vendor/k8s.io/kubernetes/test/integration/framework/util.go
generated
vendored
@ -19,29 +19,20 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
)
|
||||
|
||||
const (
|
||||
// When these values are updated, also update cmd/kubelet/app/options/options.go
|
||||
// A copy of these values exist in e2e/framework/util.go.
|
||||
currentPodInfraContainerImageName = "gcr.io/google_containers/pause"
|
||||
currentPodInfraContainerImageVersion = "3.0"
|
||||
// When these values are updated, also update cmd/kubelet/app/options/container_runtime.go
|
||||
// A copy of these values exist in test/utils/image/manifest.go
|
||||
currentPodInfraContainerImageName = "k8s.gcr.io/pause"
|
||||
currentPodInfraContainerImageVersion = "3.1"
|
||||
)
|
||||
|
||||
// GetServerArchitecture fetches the architecture of the cluster's apiserver.
|
||||
@ -80,48 +71,3 @@ func CreateTestingNamespace(baseName string, apiserver *httptest.Server, t *test
|
||||
func DeleteTestingNamespace(ns *v1.Namespace, apiserver *httptest.Server, t *testing.T) {
|
||||
// TODO: Remove all resources from a given namespace once we implement CreateTestingNamespace.
|
||||
}
|
||||
|
||||
// RCFromManifest reads a .json file and returns the rc in it.
|
||||
func RCFromManifest(fileName string) *v1.ReplicationController {
|
||||
data, err := ioutil.ReadFile(fileName)
|
||||
if err != nil {
|
||||
glog.Fatalf("Unexpected error reading rc manifest %v", err)
|
||||
}
|
||||
var controller v1.ReplicationController
|
||||
if err := runtime.DecodeInto(testapi.Default.Codec(), data, &controller); err != nil {
|
||||
glog.Fatalf("Unexpected error reading rc manifest %v", err)
|
||||
}
|
||||
return &controller
|
||||
}
|
||||
|
||||
// StopRC stops the rc via kubectl's stop library
|
||||
func StopRC(rc *v1.ReplicationController, clientset internalclientset.Interface) error {
|
||||
reaper, err := kubectl.ReaperFor(api.Kind("ReplicationController"), clientset)
|
||||
if err != nil || reaper == nil {
|
||||
return err
|
||||
}
|
||||
err = reaper.Stop(rc.Namespace, rc.Name, 0, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ScaleRC scales the given rc to the given replicas.
|
||||
func ScaleRC(name, ns string, replicas int32, clientset internalclientset.Interface) (*api.ReplicationController, error) {
|
||||
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
retry := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout}
|
||||
waitForReplicas := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout}
|
||||
err = scaler.Scale(ns, name, uint(replicas), nil, retry, waitForReplicas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
scaled, err := clientset.Core().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return scaled, nil
|
||||
}
|
||||
|
9
vendor/k8s.io/kubernetes/test/integration/garbagecollector/BUILD
generated
vendored
9
vendor/k8s.io/kubernetes/test/integration/garbagecollector/BUILD
generated
vendored
@ -1,9 +1,4 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_test")
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
@ -13,7 +8,6 @@ go_test(
|
||||
"garbage_collector_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/garbagecollector",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app/testing:go_default_library",
|
||||
@ -52,4 +46,5 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/garbagecollector/garbage_collector_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/garbagecollector/garbage_collector_test.go
generated
vendored
@ -851,7 +851,7 @@ func TestCustomResourceCascadingDeletion(t *testing.T) {
|
||||
// between core and custom resources.
|
||||
//
|
||||
// TODO: Consider how this could be represented with table-style tests (e.g. a
|
||||
// before/after expected object graph given a delete operation targetting a
|
||||
// before/after expected object graph given a delete operation targeting a
|
||||
// specific node in the before graph with certain delete options).
|
||||
func TestMixedRelationships(t *testing.T) {
|
||||
ctx := setup(t, 5)
|
||||
|
36
vendor/k8s.io/kubernetes/test/integration/master/BUILD
generated
vendored
36
vendor/k8s.io/kubernetes/test/integration/master/BUILD
generated
vendored
@ -2,6 +2,7 @@ package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
@ -10,11 +11,13 @@ go_test(
|
||||
size = "large",
|
||||
srcs = [
|
||||
"crd_test.go",
|
||||
"kms_transformation_test.go",
|
||||
"kube_apiserver_test.go",
|
||||
"main_test.go",
|
||||
"secrets_transformation_test.go",
|
||||
"synthetic_master_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/master",
|
||||
embed = [":go_default_library"],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app/testing:go_default_library",
|
||||
@ -29,6 +32,7 @@ go_test(
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/networking/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
@ -42,6 +46,10 @@ go_test(
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/features:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/tokentest:go_default_library",
|
||||
@ -49,6 +57,7 @@ go_test(
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -64,3 +73,28 @@ filegroup(
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"kms_plugin_mock.go",
|
||||
"transformation_testcase.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/master",
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app/testing:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
111
vendor/k8s.io/kubernetes/test/integration/master/kms_plugin_mock.go
generated
vendored
Normal file
111
vendor/k8s.io/kubernetes/test/integration/master/kms_plugin_mock.go
generated
vendored
Normal file
@ -0,0 +1,111 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
kmsapi "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1"
|
||||
)
|
||||
|
||||
const (
|
||||
kmsAPIVersion = "v1beta1"
|
||||
sockFile = "/tmp/kms-provider.sock"
|
||||
unixProtocol = "unix"
|
||||
)
|
||||
|
||||
// base64Plugin gRPC sever for a mock KMS provider.
|
||||
// Uses base64 to simulate encrypt and decrypt.
|
||||
type base64Plugin struct {
|
||||
grpcServer *grpc.Server
|
||||
listener net.Listener
|
||||
|
||||
// Allow users of the plugin to sense requests that were passed to KMS.
|
||||
encryptRequest chan *kmsapi.EncryptRequest
|
||||
decryptRequest chan *kmsapi.DecryptRequest
|
||||
}
|
||||
|
||||
func NewBase64Plugin() (*base64Plugin, error) {
|
||||
if err := cleanSockFile(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
listener, err := net.Listen(unixProtocol, sockFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to listen on the unix socket, error: %v", err)
|
||||
}
|
||||
|
||||
server := grpc.NewServer()
|
||||
|
||||
result := &base64Plugin{
|
||||
grpcServer: server,
|
||||
listener: listener,
|
||||
encryptRequest: make(chan *kmsapi.EncryptRequest, 1),
|
||||
decryptRequest: make(chan *kmsapi.DecryptRequest, 1),
|
||||
}
|
||||
|
||||
kmsapi.RegisterKeyManagementServiceServer(server, result)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (s *base64Plugin) cleanUp() {
|
||||
s.grpcServer.Stop()
|
||||
s.listener.Close()
|
||||
cleanSockFile()
|
||||
}
|
||||
|
||||
var testProviderAPIVersion = kmsAPIVersion
|
||||
|
||||
func (s *base64Plugin) Version(ctx context.Context, request *kmsapi.VersionRequest) (*kmsapi.VersionResponse, error) {
|
||||
return &kmsapi.VersionResponse{Version: testProviderAPIVersion, RuntimeName: "testKMS", RuntimeVersion: "0.0.1"}, nil
|
||||
}
|
||||
|
||||
func (s *base64Plugin) Decrypt(ctx context.Context, request *kmsapi.DecryptRequest) (*kmsapi.DecryptResponse, error) {
|
||||
s.decryptRequest <- request
|
||||
buf := make([]byte, base64.StdEncoding.DecodedLen(len(request.Cipher)))
|
||||
n, err := base64.StdEncoding.Decode(buf, request.Cipher)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &kmsapi.DecryptResponse{Plain: buf[:n]}, nil
|
||||
}
|
||||
|
||||
func (s *base64Plugin) Encrypt(ctx context.Context, request *kmsapi.EncryptRequest) (*kmsapi.EncryptResponse, error) {
|
||||
s.encryptRequest <- request
|
||||
|
||||
buf := make([]byte, base64.StdEncoding.EncodedLen(len(request.Plain)))
|
||||
base64.StdEncoding.Encode(buf, request.Plain)
|
||||
|
||||
return &kmsapi.EncryptResponse{Cipher: buf}, nil
|
||||
}
|
||||
|
||||
func cleanSockFile() error {
|
||||
err := unix.Unlink(sockFile)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to delete the socket file, error: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
163
vendor/k8s.io/kubernetes/test/integration/master/kms_transformation_test.go
generated
vendored
Normal file
163
vendor/k8s.io/kubernetes/test/integration/master/kms_transformation_test.go
generated
vendored
Normal file
@ -0,0 +1,163 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package master
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/aes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/storage/value"
|
||||
aestransformer "k8s.io/apiserver/pkg/storage/value/encrypt/aes"
|
||||
|
||||
kmsapi "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1"
|
||||
)
|
||||
|
||||
const (
|
||||
kmsPrefix = "k8s:enc:kms:v1:grpc-kms-provider:"
|
||||
dekKeySizeLen = 2
|
||||
|
||||
kmsConfigYAML = `
|
||||
kind: EncryptionConfig
|
||||
apiVersion: v1
|
||||
resources:
|
||||
- resources:
|
||||
- secrets
|
||||
providers:
|
||||
- kms:
|
||||
name: grpc-kms-provider
|
||||
cachesize: 1000
|
||||
endpoint: unix:///tmp/kms-provider.sock
|
||||
`
|
||||
)
|
||||
|
||||
// rawDEKKEKSecret provides operations for working with secrets transformed with Data Encryption Key(DEK) Key Encryption Kye(KEK) envelop.
|
||||
type rawDEKKEKSecret []byte
|
||||
|
||||
func (r rawDEKKEKSecret) getDEKLen() int {
|
||||
// DEK's length is stored in the two bytes that follow the prefix.
|
||||
return int(binary.BigEndian.Uint16(r[len(kmsPrefix) : len(kmsPrefix)+dekKeySizeLen]))
|
||||
}
|
||||
|
||||
func (r rawDEKKEKSecret) getDEK() []byte {
|
||||
return r[len(kmsPrefix)+dekKeySizeLen : len(kmsPrefix)+dekKeySizeLen+r.getDEKLen()]
|
||||
}
|
||||
|
||||
func (r rawDEKKEKSecret) getStartOfPayload() int {
|
||||
return len(kmsPrefix) + dekKeySizeLen + r.getDEKLen()
|
||||
}
|
||||
|
||||
func (r rawDEKKEKSecret) getPayload() []byte {
|
||||
return r[r.getStartOfPayload():]
|
||||
}
|
||||
|
||||
// TestKMSProvider is an integration test between KubAPI, ETCD and KMS Plugin
|
||||
// Concretely, this test verifies the following integration contracts:
|
||||
// 1. Raw records in ETCD that were processed by KMS Provider should be prefixed with k8s:enc:kms:v1:grpc-kms-provider-name:
|
||||
// 2. Data Encryption Key (DEK) should be generated by envelopeTransformer and passed to KMS gRPC Plugin
|
||||
// 3. KMS gRPC Plugin should encrypt the DEK with a Key Encryption Key (KEK) and pass it back to envelopeTransformer
|
||||
// 4. The payload (ex. Secret) should be encrypted via AES CBC transform
|
||||
// 5. Prefix-EncryptedDEK-EncryptedPayload structure should be deposited to ETCD
|
||||
func TestKMSProvider(t *testing.T) {
|
||||
pluginMock, err := NewBase64Plugin()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create mock of KMS Plugin: %v", err)
|
||||
}
|
||||
defer pluginMock.cleanUp()
|
||||
go pluginMock.grpcServer.Serve(pluginMock.listener)
|
||||
|
||||
test, err := newTransformTest(t, kmsConfigYAML)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start KUBE API Server with encryptionConfig\n %s", kmsConfigYAML)
|
||||
}
|
||||
defer test.cleanUp()
|
||||
|
||||
secretETCDPath := test.getETCDPath()
|
||||
var rawSecretAsSeenByETCD rawDEKKEKSecret
|
||||
rawSecretAsSeenByETCD, err = test.getRawSecretFromETCD()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read %s from etcd: %v", secretETCDPath, err)
|
||||
}
|
||||
|
||||
if !bytes.HasPrefix(rawSecretAsSeenByETCD, []byte(kmsPrefix)) {
|
||||
t.Fatalf("expected secret to be prefixed with %s, but got %s", kmsPrefix, rawSecretAsSeenByETCD)
|
||||
}
|
||||
|
||||
// Since Data Encryption Key (DEK) is randomly generated (per encryption operation), we need to ask KMS Mock for it.
|
||||
dekPlainAsSeenByKMS, err := getDEKFromKMSPlugin(pluginMock)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get DEK from KMS: %v", err)
|
||||
}
|
||||
|
||||
decryptResponse, err := pluginMock.Decrypt(context.Background(),
|
||||
&kmsapi.DecryptRequest{Version: kmsAPIVersion, Cipher: rawSecretAsSeenByETCD.getDEK()})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to decrypt DEK, %v", err)
|
||||
}
|
||||
dekPlainAsWouldBeSeenByETCD := decryptResponse.Plain
|
||||
|
||||
if !bytes.Equal(dekPlainAsSeenByKMS, dekPlainAsWouldBeSeenByETCD) {
|
||||
t.Fatalf("expected dekPlainAsSeenByKMS %v to be passed to KMS Plugin, but got %s",
|
||||
dekPlainAsSeenByKMS, dekPlainAsWouldBeSeenByETCD)
|
||||
}
|
||||
|
||||
plainSecret, err := decryptPayload(dekPlainAsWouldBeSeenByETCD, rawSecretAsSeenByETCD, secretETCDPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to transform from storage via AESCBC, err: %v", err)
|
||||
}
|
||||
|
||||
if !strings.Contains(string(plainSecret), secretVal) {
|
||||
t.Fatalf("expected %q after decryption, but got %q", secretVal, string(plainSecret))
|
||||
}
|
||||
|
||||
// Secrets should be un-enveloped on direct reads from Kube API Server.
|
||||
s, err := test.restClient.CoreV1().Secrets(testNamespace).Get(testSecret, metav1.GetOptions{})
|
||||
if secretVal != string(s.Data[secretKey]) {
|
||||
t.Fatalf("expected %s from KubeAPI, but got %s", secretVal, string(s.Data[secretKey]))
|
||||
}
|
||||
}
|
||||
|
||||
func getDEKFromKMSPlugin(pluginMock *base64Plugin) ([]byte, error) {
|
||||
select {
|
||||
case e := <-pluginMock.encryptRequest:
|
||||
return e.Plain, nil
|
||||
case <-time.After(1 * time.Microsecond):
|
||||
return nil, fmt.Errorf("timed-out while getting encryption request from KMS Plugin Mock")
|
||||
}
|
||||
}
|
||||
|
||||
func decryptPayload(key []byte, secret rawDEKKEKSecret, secretETCDPath string) ([]byte, error) {
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize AES Cipher: %v", err)
|
||||
}
|
||||
// etcd path of the key is used as the authenticated context - need to pass it to decrypt
|
||||
ctx := value.DefaultContext([]byte(secretETCDPath))
|
||||
aescbcTransformer := aestransformer.NewCBCTransformer(block)
|
||||
plainSecret, _, err := aescbcTransformer.TransformFromStorage(secret.getPayload(), ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to transform from storage via AESCBC, err: %v", err)
|
||||
}
|
||||
|
||||
return plainSecret, nil
|
||||
}
|
72
vendor/k8s.io/kubernetes/test/integration/master/kube_apiserver_test.go
generated
vendored
72
vendor/k8s.io/kubernetes/test/integration/master/kube_apiserver_test.go
generated
vendored
@ -17,21 +17,25 @@ limitations under the License.
|
||||
package master
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
appsv1beta1 "k8s.io/api/apps/v1beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kube-aggregator/pkg/apis/apiregistration"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, framework.SharedEtcd())
|
||||
defer server.TearDownFn()
|
||||
|
||||
client, err := kubernetes.NewForConfig(result.ClientConfig)
|
||||
client, err := kubernetes.NewForConfig(server.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@ -72,3 +76,65 @@ func TestRun(t *testing.T) {
|
||||
t.Fatalf("Failed to create deployment: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestOpenAPIDelegationChainPlumbing is a smoke test that checks for
|
||||
// the existence of some representative paths from the
|
||||
// apiextensions-server and the kube-aggregator server, both part of
|
||||
// the delegation chain in kube-apiserver.
|
||||
func TestOpenAPIDelegationChainPlumbing(t *testing.T) {
|
||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, framework.SharedEtcd())
|
||||
defer server.TearDownFn()
|
||||
|
||||
kubeclient, err := kubernetes.NewForConfig(server.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
result := kubeclient.RESTClient().Get().AbsPath("/swagger.json").Do()
|
||||
status := 0
|
||||
result.StatusCode(&status)
|
||||
if status != 200 {
|
||||
t.Fatalf("GET /swagger.json failed: expected status=%d, got=%d", 200, status)
|
||||
}
|
||||
|
||||
raw, err := result.Raw()
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
type openAPISchema struct {
|
||||
Paths map[string]interface{} `json:"paths"`
|
||||
}
|
||||
|
||||
var doc openAPISchema
|
||||
err = json.Unmarshal(raw, &doc)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to unmarshal: %v", err)
|
||||
}
|
||||
|
||||
matchedExtension := false
|
||||
extensionsPrefix := "/apis/" + apiextensions.GroupName
|
||||
|
||||
matchedRegistration := false
|
||||
registrationPrefix := "/apis/" + apiregistration.GroupName
|
||||
|
||||
for path := range doc.Paths {
|
||||
if strings.HasPrefix(path, extensionsPrefix) {
|
||||
matchedExtension = true
|
||||
}
|
||||
if strings.HasPrefix(path, registrationPrefix) {
|
||||
matchedRegistration = true
|
||||
}
|
||||
if matchedExtension && matchedRegistration {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !matchedExtension {
|
||||
t.Errorf("missing path: %q", extensionsPrefix)
|
||||
}
|
||||
|
||||
if !matchedRegistration {
|
||||
t.Errorf("missing path: %q", registrationPrefix)
|
||||
}
|
||||
}
|
||||
|
177
vendor/k8s.io/kubernetes/test/integration/master/secrets_transformation_test.go
generated
vendored
Normal file
177
vendor/k8s.io/kubernetes/test/integration/master/secrets_transformation_test.go
generated
vendored
Normal file
@ -0,0 +1,177 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package master
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apiserver/pkg/server/options/encryptionconfig"
|
||||
"k8s.io/apiserver/pkg/storage/value"
|
||||
aestransformer "k8s.io/apiserver/pkg/storage/value/encrypt/aes"
|
||||
)
|
||||
|
||||
const (
|
||||
aesGCMPrefix = "k8s:enc:aesgcm:v1:key1:"
|
||||
aesCBCPrefix = "k8s:enc:aescbc:v1:key1:"
|
||||
|
||||
aesGCMConfigYAML = `
|
||||
kind: EncryptionConfig
|
||||
apiVersion: v1
|
||||
resources:
|
||||
- resources:
|
||||
- secrets
|
||||
providers:
|
||||
- aesgcm:
|
||||
keys:
|
||||
- name: key1
|
||||
secret: c2VjcmV0IGlzIHNlY3VyZQ==
|
||||
`
|
||||
|
||||
aesCBCConfigYAML = `
|
||||
kind: EncryptionConfig
|
||||
apiVersion: v1
|
||||
resources:
|
||||
- resources:
|
||||
- secrets
|
||||
providers:
|
||||
- aescbc:
|
||||
keys:
|
||||
- name: key1
|
||||
secret: c2VjcmV0IGlzIHNlY3VyZQ==
|
||||
`
|
||||
|
||||
identityConfigYAML = `
|
||||
kind: EncryptionConfig
|
||||
apiVersion: v1
|
||||
resources:
|
||||
- resources:
|
||||
- secrets
|
||||
providers:
|
||||
- identity: {}
|
||||
`
|
||||
)
|
||||
|
||||
// TestSecretsShouldBeEnveloped is an integration test between KubeAPI and etcd that checks:
|
||||
// 1. Secrets are encrypted on write
|
||||
// 2. Secrets are decrypted on read
|
||||
// when EncryptionConfig is passed to KubeAPI server.
|
||||
func TestSecretsShouldBeTransformed(t *testing.T) {
|
||||
var testCases = []struct {
|
||||
transformerConfigContent string
|
||||
transformerPrefix string
|
||||
unSealFunc unSealSecret
|
||||
}{
|
||||
{aesGCMConfigYAML, aesGCMPrefix, unSealWithGCMTransformer},
|
||||
{aesCBCConfigYAML, aesCBCPrefix, unSealWithCBCTransformer},
|
||||
// TODO: add secretbox
|
||||
}
|
||||
for _, tt := range testCases {
|
||||
test, err := newTransformTest(t, tt.transformerConfigContent)
|
||||
if err != nil {
|
||||
test.cleanUp()
|
||||
t.Errorf("failed to setup test for envelop %s, error was %v", tt.transformerPrefix, err)
|
||||
continue
|
||||
}
|
||||
test.run(tt.unSealFunc, tt.transformerPrefix)
|
||||
test.cleanUp()
|
||||
}
|
||||
}
|
||||
|
||||
// Baseline (no enveloping) - use to contrast with enveloping benchmarks.
|
||||
func BenchmarkBase(b *testing.B) {
|
||||
runBenchmark(b, "")
|
||||
}
|
||||
|
||||
// Identity transformer is a NOOP (crypto-wise) - use to contrast with AESGCM and AESCBC benchmark results.
|
||||
func BenchmarkIdentityWrite(b *testing.B) {
|
||||
runBenchmark(b, identityConfigYAML)
|
||||
}
|
||||
|
||||
func BenchmarkAESGCMEnvelopeWrite(b *testing.B) {
|
||||
runBenchmark(b, aesGCMConfigYAML)
|
||||
}
|
||||
|
||||
func BenchmarkAESCBCEnvelopeWrite(b *testing.B) {
|
||||
runBenchmark(b, aesCBCConfigYAML)
|
||||
}
|
||||
|
||||
func runBenchmark(b *testing.B, transformerConfig string) {
|
||||
b.StopTimer()
|
||||
test, err := newTransformTest(b, transformerConfig)
|
||||
defer test.cleanUp()
|
||||
if err != nil {
|
||||
b.Fatalf("failed to setup benchmark for config %s, error was %v", transformerConfig, err)
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
test.benchmark(b)
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func unSealWithGCMTransformer(cipherText []byte, ctx value.Context,
|
||||
transformerConfig encryptionconfig.ProviderConfig) ([]byte, error) {
|
||||
|
||||
block, err := newAESCipher(transformerConfig.AESGCM.Keys[0].Secret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create block cipher: %v", err)
|
||||
}
|
||||
|
||||
gcmTransformer := aestransformer.NewGCMTransformer(block)
|
||||
|
||||
clearText, _, err := gcmTransformer.TransformFromStorage(cipherText, ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decypt secret: %v", err)
|
||||
}
|
||||
|
||||
return clearText, nil
|
||||
}
|
||||
|
||||
func unSealWithCBCTransformer(cipherText []byte, ctx value.Context,
|
||||
transformerConfig encryptionconfig.ProviderConfig) ([]byte, error) {
|
||||
|
||||
block, err := newAESCipher(transformerConfig.AESCBC.Keys[0].Secret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cbcTransformer := aestransformer.NewCBCTransformer(block)
|
||||
|
||||
clearText, _, err := cbcTransformer.TransformFromStorage(cipherText, ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decypt secret: %v", err)
|
||||
}
|
||||
|
||||
return clearText, nil
|
||||
}
|
||||
|
||||
func newAESCipher(key string) (cipher.Block, error) {
|
||||
k, err := base64.StdEncoding.DecodeString(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode config secret: %v", err)
|
||||
}
|
||||
|
||||
block, err := aes.NewCipher(k)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create AES cipher: %v", err)
|
||||
}
|
||||
|
||||
return block, nil
|
||||
}
|
10
vendor/k8s.io/kubernetes/test/integration/master/synthetic_master_test.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/integration/master/synthetic_master_test.go
generated
vendored
@ -134,7 +134,7 @@ func TestEmptyList(t *testing.T) {
|
||||
|
||||
func initStatusForbiddenMasterCongfig() *master.Config {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authorizer = authorizerfactory.NewAlwaysDenyAuthorizer()
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = authorizerfactory.NewAlwaysDenyAuthorizer()
|
||||
return masterConfig
|
||||
}
|
||||
|
||||
@ -143,8 +143,8 @@ func initUnauthorizedMasterCongfig() *master.Config {
|
||||
tokenAuthenticator := tokentest.New()
|
||||
tokenAuthenticator.Tokens[AliceToken] = &user.DefaultInfo{Name: "alice", UID: "1"}
|
||||
tokenAuthenticator.Tokens[BobToken] = &user.DefaultInfo{Name: "bob", UID: "2"}
|
||||
masterConfig.GenericConfig.Authenticator = group.NewGroupAdder(bearertoken.New(tokenAuthenticator), []string{user.AllAuthenticated})
|
||||
masterConfig.GenericConfig.Authorizer = allowAliceAuthorizer{}
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = group.NewGroupAdder(bearertoken.New(tokenAuthenticator), []string{user.AllAuthenticated})
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = allowAliceAuthorizer{}
|
||||
return masterConfig
|
||||
}
|
||||
|
||||
@ -279,7 +279,7 @@ var deploymentExtensions string = `
|
||||
"spec": {
|
||||
"containers": [{
|
||||
"name": "nginx",
|
||||
"image": "gcr.io/google-containers/nginx:1.7.9"
|
||||
"image": "k8s.gcr.io/nginx:1.7.9"
|
||||
}]
|
||||
}
|
||||
}
|
||||
@ -306,7 +306,7 @@ var deploymentApps string = `
|
||||
"spec": {
|
||||
"containers": [{
|
||||
"name": "nginx",
|
||||
"image": "gcr.io/google-containers/nginx:1.7.9"
|
||||
"image": "k8s.gcr.io/nginx:1.7.9"
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
239
vendor/k8s.io/kubernetes/test/integration/master/transformation_testcase.go
generated
vendored
Normal file
239
vendor/k8s.io/kubernetes/test/integration/master/transformation_testcase.go
generated
vendored
Normal file
@ -0,0 +1,239 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package master
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/ghodss/yaml"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/server/options/encryptionconfig"
|
||||
"k8s.io/apiserver/pkg/storage/storagebackend"
|
||||
"k8s.io/apiserver/pkg/storage/value"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
secretKey = "api_key"
|
||||
secretVal = "086a7ffc-0225-11e8-ba89-0ed5f89f718b"
|
||||
encryptionConfigFileName = "encryption.conf"
|
||||
testNamespace = "secret-encryption-test"
|
||||
testSecret = "test-secret"
|
||||
)
|
||||
|
||||
type unSealSecret func(cipherText []byte, ctx value.Context, config encryptionconfig.ProviderConfig) ([]byte, error)
|
||||
|
||||
type transformTest struct {
|
||||
logger kubeapiservertesting.Logger
|
||||
storageConfig *storagebackend.Config
|
||||
configDir string
|
||||
transformerConfig string
|
||||
kubeAPIServer kubeapiservertesting.TestServer
|
||||
restClient *kubernetes.Clientset
|
||||
ns *corev1.Namespace
|
||||
secret *corev1.Secret
|
||||
}
|
||||
|
||||
func newTransformTest(l kubeapiservertesting.Logger, transformerConfigYAML string) (*transformTest, error) {
|
||||
e := transformTest{
|
||||
logger: l,
|
||||
transformerConfig: transformerConfigYAML,
|
||||
storageConfig: framework.SharedEtcd(),
|
||||
}
|
||||
|
||||
var err error
|
||||
if transformerConfigYAML != "" {
|
||||
if e.configDir, err = e.createEncryptionConfig(); err != nil {
|
||||
return nil, fmt.Errorf("error while creating KubeAPIServer encryption config: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if e.kubeAPIServer, err = kubeapiservertesting.StartTestServer(l, e.getEncryptionOptions(), e.storageConfig); err != nil {
|
||||
return nil, fmt.Errorf("failed to start KubeAPI server: %v", err)
|
||||
}
|
||||
|
||||
if e.restClient, err = kubernetes.NewForConfig(e.kubeAPIServer.ClientConfig); err != nil {
|
||||
return nil, fmt.Errorf("error while creating rest client: %v", err)
|
||||
}
|
||||
|
||||
if e.ns, err = e.createNamespace(testNamespace); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if e.secret, err = e.createSecret(testSecret, e.ns.Name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &e, nil
|
||||
}
|
||||
|
||||
func (e *transformTest) cleanUp() {
|
||||
os.RemoveAll(e.configDir)
|
||||
e.restClient.CoreV1().Namespaces().Delete(e.ns.Name, metav1.NewDeleteOptions(0))
|
||||
e.kubeAPIServer.TearDownFn()
|
||||
}
|
||||
|
||||
func (e *transformTest) run(unSealSecretFunc unSealSecret, expectedEnvelopePrefix string) {
|
||||
response, err := e.readRawRecordFromETCD(e.getETCDPath())
|
||||
if err != nil {
|
||||
e.logger.Errorf("failed to read from etcd: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if !bytes.HasPrefix(response.Kvs[0].Value, []byte(expectedEnvelopePrefix)) {
|
||||
e.logger.Errorf("expected secret to be prefixed with %s, but got %s",
|
||||
expectedEnvelopePrefix, response.Kvs[0].Value)
|
||||
return
|
||||
}
|
||||
|
||||
// etcd path of the key is used as the authenticated context - need to pass it to decrypt
|
||||
ctx := value.DefaultContext([]byte(e.getETCDPath()))
|
||||
// Envelope header precedes the payload
|
||||
sealedData := response.Kvs[0].Value[len(expectedEnvelopePrefix):]
|
||||
transformerConfig, err := e.getEncryptionConfig()
|
||||
if err != nil {
|
||||
e.logger.Errorf("failed to parse transformer config: %v", err)
|
||||
}
|
||||
v, err := unSealSecretFunc(sealedData, ctx, *transformerConfig)
|
||||
if err != nil {
|
||||
e.logger.Errorf("failed to unseal secret: %v", err)
|
||||
return
|
||||
}
|
||||
if !strings.Contains(string(v), secretVal) {
|
||||
e.logger.Errorf("expected %q after decryption, but got %q", secretVal, string(v))
|
||||
}
|
||||
|
||||
// Secrets should be un-enveloped on direct reads from Kube API Server.
|
||||
s, err := e.restClient.CoreV1().Secrets(testNamespace).Get(testSecret, metav1.GetOptions{})
|
||||
if secretVal != string(s.Data[secretKey]) {
|
||||
e.logger.Errorf("expected %s from KubeAPI, but got %s", secretVal, string(s.Data[secretKey]))
|
||||
}
|
||||
}
|
||||
|
||||
func (e *transformTest) benchmark(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := e.createSecret(e.secret.Name+strconv.Itoa(i), e.ns.Name)
|
||||
if err != nil {
|
||||
b.Fatalf("failed to create a secret: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *transformTest) getETCDPath() string {
|
||||
return fmt.Sprintf("/%s/secrets/%s/%s", e.storageConfig.Prefix, e.ns.Name, e.secret.Name)
|
||||
}
|
||||
|
||||
func (e *transformTest) getRawSecretFromETCD() ([]byte, error) {
|
||||
secretETCDPath := e.getETCDPath()
|
||||
etcdResponse, err := e.readRawRecordFromETCD(secretETCDPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read %s from etcd: %v", secretETCDPath, err)
|
||||
}
|
||||
return etcdResponse.Kvs[0].Value, nil
|
||||
}
|
||||
|
||||
func (e *transformTest) getEncryptionOptions() []string {
|
||||
if e.transformerConfig != "" {
|
||||
return []string{"--experimental-encryption-provider-config", path.Join(e.configDir, encryptionConfigFileName)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *transformTest) createEncryptionConfig() (string, error) {
|
||||
tempDir, err := ioutil.TempDir("", "secrets-encryption-test")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create temp directory: %v", err)
|
||||
}
|
||||
|
||||
encryptionConfig := path.Join(tempDir, encryptionConfigFileName)
|
||||
|
||||
if err := ioutil.WriteFile(encryptionConfig, []byte(e.transformerConfig), 0644); err != nil {
|
||||
os.RemoveAll(tempDir)
|
||||
return "", fmt.Errorf("error while writing encryption config: %v", err)
|
||||
}
|
||||
|
||||
return tempDir, nil
|
||||
}
|
||||
|
||||
func (e *transformTest) getEncryptionConfig() (*encryptionconfig.ProviderConfig, error) {
|
||||
var config encryptionconfig.EncryptionConfig
|
||||
err := yaml.Unmarshal([]byte(e.transformerConfig), &config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to extract transformer key: %v", err)
|
||||
}
|
||||
|
||||
return &config.Resources[0].Providers[0], nil
|
||||
}
|
||||
|
||||
func (e *transformTest) createNamespace(name string) (*corev1.Namespace, error) {
|
||||
ns := &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := e.restClient.CoreV1().Namespaces().Create(ns); err != nil {
|
||||
return nil, fmt.Errorf("unable to create testing namespace %v", err)
|
||||
}
|
||||
|
||||
return ns, nil
|
||||
}
|
||||
|
||||
func (e *transformTest) createSecret(name, namespace string) (*corev1.Secret, error) {
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
secretKey: []byte(secretVal),
|
||||
},
|
||||
}
|
||||
if _, err := e.restClient.CoreV1().Secrets(secret.Namespace).Create(secret); err != nil {
|
||||
return nil, fmt.Errorf("error while writing secret: %v", err)
|
||||
}
|
||||
|
||||
return secret, nil
|
||||
}
|
||||
|
||||
func (e *transformTest) readRawRecordFromETCD(path string) (*clientv3.GetResponse, error) {
|
||||
etcdClient, err := integration.GetEtcdKVClient(e.kubeAPIServer.ServerOpts.Etcd.StorageConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create etcd client: %v", err)
|
||||
}
|
||||
response, err := etcdClient.Get(context.Background(), path, clientv3.WithPrefix())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to retrieve secret from etcd %v", err)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
3
vendor/k8s.io/kubernetes/test/integration/metrics/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/test/integration/metrics/BUILD
generated
vendored
@ -32,8 +32,7 @@ go_test(
|
||||
"main_test.go",
|
||||
"metrics_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/metrics",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
|
1
vendor/k8s.io/kubernetes/test/integration/objectmeta/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/integration/objectmeta/BUILD
generated
vendored
@ -12,7 +12,6 @@ go_test(
|
||||
"main_test.go",
|
||||
"objectmeta_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/objectmeta",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
|
1
vendor/k8s.io/kubernetes/test/integration/openshift/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/integration/openshift/BUILD
generated
vendored
@ -12,7 +12,6 @@ go_test(
|
||||
"main_test.go",
|
||||
"openshift_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/openshift",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/master:go_default_library",
|
||||
|
1
vendor/k8s.io/kubernetes/test/integration/openshift/openshift_test.go
generated
vendored
1
vendor/k8s.io/kubernetes/test/integration/openshift/openshift_test.go
generated
vendored
@ -32,7 +32,6 @@ func TestMasterExportsSymbols(t *testing.T) {
|
||||
},
|
||||
ExtraConfig: master.ExtraConfig{
|
||||
EnableCoreControllers: false,
|
||||
EnableUISupport: false,
|
||||
EnableLogsSupport: false,
|
||||
},
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/test/integration/pods/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/integration/pods/BUILD
generated
vendored
@ -12,7 +12,6 @@ go_test(
|
||||
"main_test.go",
|
||||
"pods_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/pods",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
|
1
vendor/k8s.io/kubernetes/test/integration/quota/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/integration/quota/BUILD
generated
vendored
@ -12,7 +12,6 @@ go_test(
|
||||
"main_test.go",
|
||||
"quota_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/quota",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
|
1
vendor/k8s.io/kubernetes/test/integration/replicaset/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/integration/replicaset/BUILD
generated
vendored
@ -12,7 +12,6 @@ go_test(
|
||||
"main_test.go",
|
||||
"replicaset_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/replicaset",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
|
16
vendor/k8s.io/kubernetes/test/integration/replicaset/replicaset_test.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/integration/replicaset/replicaset_test.go
generated
vendored
@ -116,7 +116,7 @@ func newMatchingPod(podName, namespace string) *v1.Pod {
|
||||
// sets and pods are rsNum and podNum. It returns error if the
|
||||
// communication with the API server fails.
|
||||
func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rsNum, podNum int) (bool, error) {
|
||||
rsClient := clientSet.Extensions().ReplicaSets(namespace)
|
||||
rsClient := clientSet.ExtensionsV1beta1().ReplicaSets(namespace)
|
||||
podClient := clientSet.CoreV1().Pods(namespace)
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@ -200,7 +200,7 @@ func createRSsPods(t *testing.T, clientSet clientset.Interface, rss []*v1beta1.R
|
||||
var createdRSs []*v1beta1.ReplicaSet
|
||||
var createdPods []*v1.Pod
|
||||
for _, rs := range rss {
|
||||
createdRS, err := clientSet.Extensions().ReplicaSets(rs.Namespace).Create(rs)
|
||||
createdRS, err := clientSet.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Create(rs)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create replica set %s: %v", rs.Name, err)
|
||||
}
|
||||
@ -226,7 +226,7 @@ func waitRSStable(t *testing.T, clientSet clientset.Interface, rs *v1beta1.Repli
|
||||
|
||||
// Update .Spec.Replicas to replicas and verify .Status.Replicas is changed accordingly
|
||||
func scaleRS(t *testing.T, c clientset.Interface, rs *v1beta1.ReplicaSet, replicas int32) {
|
||||
rsClient := c.Extensions().ReplicaSets(rs.Namespace)
|
||||
rsClient := c.ExtensionsV1beta1().ReplicaSets(rs.Namespace)
|
||||
rs = updateRS(t, rsClient, rs.Name, func(rs *v1beta1.ReplicaSet) {
|
||||
*rs.Spec.Replicas = replicas
|
||||
})
|
||||
@ -361,7 +361,7 @@ func setPodsReadyCondition(t *testing.T, clientSet clientset.Interface, pods *v1
|
||||
|
||||
func testScalingUsingScaleSubresource(t *testing.T, c clientset.Interface, rs *v1beta1.ReplicaSet, replicas int32) {
|
||||
ns := rs.Namespace
|
||||
rsClient := c.Extensions().ReplicaSets(ns)
|
||||
rsClient := c.ExtensionsV1beta1().ReplicaSets(ns)
|
||||
newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain rs %s: %v", rs.Name, err)
|
||||
@ -454,7 +454,7 @@ func TestAdoption(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace(fmt.Sprintf("rs-adoption-%d", i), s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
rsClient := clientSet.Extensions().ReplicaSets(ns.Name)
|
||||
rsClient := clientSet.ExtensionsV1beta1().ReplicaSets(ns.Name)
|
||||
podClient := clientSet.CoreV1().Pods(ns.Name)
|
||||
const rsName = "rs"
|
||||
rs, err := rsClient.Create(newRS(rsName, ns.Name, 1))
|
||||
@ -549,7 +549,7 @@ func TestSpecReplicasChange(t *testing.T) {
|
||||
|
||||
// Add a template annotation change to test RS's status does update
|
||||
// without .Spec.Replicas change
|
||||
rsClient := c.Extensions().ReplicaSets(ns.Name)
|
||||
rsClient := c.ExtensionsV1beta1().ReplicaSets(ns.Name)
|
||||
var oldGeneration int64
|
||||
newRS := updateRS(t, rsClient, rs.Name, func(rs *v1beta1.ReplicaSet) {
|
||||
oldGeneration = rs.Generation
|
||||
@ -819,7 +819,7 @@ func TestReadyAndAvailableReplicas(t *testing.T) {
|
||||
// by setting LastTransitionTime to more than 3600 seconds ago
|
||||
setPodsReadyCondition(t, c, thirdPodList, v1.ConditionTrue, time.Now().Add(-120*time.Minute))
|
||||
|
||||
rsClient := c.Extensions().ReplicaSets(ns.Name)
|
||||
rsClient := c.ExtensionsV1beta1().ReplicaSets(ns.Name)
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -898,7 +898,7 @@ func TestFullyLabeledReplicas(t *testing.T) {
|
||||
waitRSStable(t, c, rs)
|
||||
|
||||
// Change RS's template labels to have extra labels, but not its selector
|
||||
rsClient := c.Extensions().ReplicaSets(ns.Name)
|
||||
rsClient := c.ExtensionsV1beta1().ReplicaSets(ns.Name)
|
||||
updateRS(t, rsClient, rs.Name, func(rs *v1beta1.ReplicaSet) {
|
||||
rs.Spec.Template.Labels = extraLabelMap
|
||||
})
|
||||
|
1
vendor/k8s.io/kubernetes/test/integration/replicationcontroller/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/integration/replicationcontroller/BUILD
generated
vendored
@ -12,7 +12,6 @@ go_test(
|
||||
"main_test.go",
|
||||
"replicationcontroller_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/replicationcontroller",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/scale/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/scale/BUILD
generated
vendored
@ -9,11 +9,11 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = ["scale_test.go"],
|
||||
importpath = "k8s.io/kubernetes/test/integration/scale",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app/testing:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library",
|
||||
"//vendor/github.com/coreos/pkg/capnslog:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
|
1
vendor/k8s.io/kubernetes/test/integration/scale/scale_test.go
generated
vendored
1
vendor/k8s.io/kubernetes/test/integration/scale/scale_test.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
_ "github.com/coreos/etcd/etcdserver/api/v3rpc" // Force package logger init.
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
|
29
vendor/k8s.io/kubernetes/test/integration/scheduler/BUILD
generated
vendored
29
vendor/k8s.io/kubernetes/test/integration/scheduler/BUILD
generated
vendored
@ -19,31 +19,27 @@ go_test(
|
||||
"taint_test.go",
|
||||
"volume_binding_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/scheduler",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//cmd/kube-scheduler/app:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//pkg/controller/node:go_default_library",
|
||||
"//pkg/controller/node/ipam:go_default_library",
|
||||
"//pkg/controller/nodelifecycle:go_default_library",
|
||||
"//pkg/controller/volume/persistentvolume:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//plugin/cmd/kube-scheduler/app:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//pkg/scheduler/schedulercache:go_default_library",
|
||||
"//plugin/pkg/admission/podtolerationrestriction:go_default_library",
|
||||
"//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library",
|
||||
"//plugin/pkg/scheduler:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//plugin/pkg/scheduler/api:go_default_library",
|
||||
"//plugin/pkg/scheduler/core:go_default_library",
|
||||
"//plugin/pkg/scheduler/factory:go_default_library",
|
||||
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
@ -57,6 +53,7 @@ go_test(
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
@ -89,9 +86,9 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//plugin/pkg/scheduler:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//plugin/pkg/scheduler/factory:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
|
39
vendor/k8s.io/kubernetes/test/integration/scheduler/extender_test.go
generated
vendored
39
vendor/k8s.io/kubernetes/test/integration/scheduler/extender_test.go
generated
vendored
@ -38,18 +38,19 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
e2e "k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
filter = "filter"
|
||||
prioritize = "prioritize"
|
||||
bind = "bind"
|
||||
filter = "filter"
|
||||
prioritize = "prioritize"
|
||||
bind = "bind"
|
||||
extendedResourceName = "foo.com/bar"
|
||||
)
|
||||
|
||||
type fitPredicate func(pod *v1.Pod, node *v1.Node) (bool, error)
|
||||
@ -334,7 +335,7 @@ func TestSchedulerExtender(t *testing.T) {
|
||||
FilterVerb: filter,
|
||||
PrioritizeVerb: prioritize,
|
||||
Weight: 3,
|
||||
EnableHttps: false,
|
||||
EnableHTTPS: false,
|
||||
},
|
||||
{
|
||||
URLPrefix: es2.URL,
|
||||
@ -342,14 +343,20 @@ func TestSchedulerExtender(t *testing.T) {
|
||||
PrioritizeVerb: prioritize,
|
||||
BindVerb: bind,
|
||||
Weight: 4,
|
||||
EnableHttps: false,
|
||||
EnableHTTPS: false,
|
||||
ManagedResources: []schedulerapi.ExtenderManagedResource{
|
||||
{
|
||||
Name: extendedResourceName,
|
||||
IgnoredByScheduler: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
URLPrefix: es3.URL,
|
||||
FilterVerb: filter,
|
||||
PrioritizeVerb: prioritize,
|
||||
Weight: 10,
|
||||
EnableHttps: false,
|
||||
EnableHTTPS: false,
|
||||
NodeCacheCapable: true,
|
||||
},
|
||||
},
|
||||
@ -420,7 +427,17 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "extender-test-pod"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "container",
|
||||
Image: e2e.GetPauseImageName(cs),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
extendedResourceName: *resource.NewQuantity(1, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
21
vendor/k8s.io/kubernetes/test/integration/scheduler/preemption_test.go
generated
vendored
21
vendor/k8s.io/kubernetes/test/integration/scheduler/preemption_test.go
generated
vendored
@ -33,8 +33,7 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/core"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/golang/glog"
|
||||
@ -42,14 +41,13 @@ import (
|
||||
|
||||
var lowPriority, mediumPriority, highPriority = int32(100), int32(200), int32(300)
|
||||
|
||||
func waitForNominatedNodeAnnotation(cs clientset.Interface, pod *v1.Pod) error {
|
||||
func waitForNominatedNodeName(cs clientset.Interface, pod *v1.Pod) error {
|
||||
if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
pod, err := cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
annot, found := pod.Annotations[core.NominatedNodeAnnotationKey]
|
||||
if found && len(annot) > 0 {
|
||||
if len(pod.Status.NominatedNodeName) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
@ -276,7 +274,7 @@ func TestPreemption(t *testing.T) {
|
||||
}
|
||||
// Also check that the preemptor pod gets the annotation for nominated node name.
|
||||
if len(test.preemptedPodIndexes) > 0 {
|
||||
if err := waitForNominatedNodeAnnotation(cs, preemptor); err != nil {
|
||||
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
|
||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v: %v", test.description, preemptor.Name, err)
|
||||
}
|
||||
}
|
||||
@ -389,7 +387,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
t.Errorf("Error while creating the preempting pod: %v", err)
|
||||
}
|
||||
// Check that the preemptor pod gets the annotation for nominated node name.
|
||||
if err := waitForNominatedNodeAnnotation(cs, preemptor); err != nil {
|
||||
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
|
||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v: %v", test.description, preemptor.Name, err)
|
||||
}
|
||||
// Make sure that preemptor is scheduled after preemptions.
|
||||
@ -462,7 +460,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
t.Errorf("Error while creating the medium priority pod: %v", err)
|
||||
}
|
||||
// Step 3. Check that nominated node name of the medium priority pod is set.
|
||||
if err := waitForNominatedNodeAnnotation(cs, medPriPod); err != nil {
|
||||
if err := waitForNominatedNodeName(cs, medPriPod); err != nil {
|
||||
t.Errorf("NominatedNodeName annotation was not set for pod %v: %v", medPriPod.Name, err)
|
||||
}
|
||||
// Step 4. Create a high priority pod.
|
||||
@ -480,7 +478,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
t.Errorf("Error while creating the high priority pod: %v", err)
|
||||
}
|
||||
// Step 5. Check that nominated node name of the high priority pod is set.
|
||||
if err := waitForNominatedNodeAnnotation(cs, highPriPod); err != nil {
|
||||
if err := waitForNominatedNodeName(cs, highPriPod); err != nil {
|
||||
t.Errorf("NominatedNodeName annotation was not set for pod %v: %v", medPriPod.Name, err)
|
||||
}
|
||||
// And the nominated node name of the medium priority pod is cleared.
|
||||
@ -489,8 +487,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Error getting the medium priority pod info: %v", err)
|
||||
}
|
||||
n, found := pod.Annotations[core.NominatedNodeAnnotationKey]
|
||||
if !found || len(n) == 0 {
|
||||
if len(pod.Status.NominatedNodeName) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
@ -755,7 +752,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
}
|
||||
// Also check that the preemptor pod gets the annotation for nominated node name.
|
||||
if len(test.preemptedPodIndexes) > 0 {
|
||||
if err := waitForNominatedNodeAnnotation(cs, preemptor); err != nil {
|
||||
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
|
||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v: %v", test.description, preemptor.Name, err)
|
||||
}
|
||||
}
|
||||
|
182
vendor/k8s.io/kubernetes/test/integration/scheduler/scheduler_test.go
generated
vendored
182
vendor/k8s.io/kubernetes/test/integration/scheduler/scheduler_test.go
generated
vendored
@ -31,6 +31,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -39,16 +40,16 @@ import (
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
schedulerapp "k8s.io/kubernetes/cmd/kube-scheduler/app"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
schedulerapp "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
@ -97,69 +98,128 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
factory.RegisterPriorityFunction("PriorityOne", PriorityOne, 1)
|
||||
factory.RegisterPriorityFunction("PriorityTwo", PriorityTwo, 1)
|
||||
|
||||
// Add a ConfigMap object.
|
||||
configPolicyName := "scheduler-custom-policy-config"
|
||||
policyConfigMap := v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: configPolicyName},
|
||||
Data: map[string]string{
|
||||
componentconfig.SchedulerPolicyConfigMapKey: `{
|
||||
"kind" : "Policy",
|
||||
"apiVersion" : "v1",
|
||||
"predicates" : [
|
||||
{"name" : "PredicateOne"},
|
||||
{"name" : "PredicateTwo"}
|
||||
],
|
||||
"priorities" : [
|
||||
{"name" : "PriorityOne", "weight" : 1},
|
||||
{"name" : "PriorityTwo", "weight" : 5}
|
||||
]
|
||||
for i, test := range []struct {
|
||||
policy string
|
||||
expectedPredicates sets.String
|
||||
expectedPrioritizers sets.String
|
||||
}{
|
||||
{
|
||||
policy: `{
|
||||
"kind" : "Policy",
|
||||
"apiVersion" : "v1",
|
||||
"predicates" : [
|
||||
{"name" : "PredicateOne"},
|
||||
{"name" : "PredicateTwo"}
|
||||
],
|
||||
"priorities" : [
|
||||
{"name" : "PriorityOne", "weight" : 1},
|
||||
{"name" : "PriorityTwo", "weight" : 5}
|
||||
]
|
||||
}`,
|
||||
expectedPredicates: sets.NewString(
|
||||
"CheckNodeCondition", // mandatory predicate
|
||||
"PredicateOne",
|
||||
"PredicateTwo",
|
||||
),
|
||||
expectedPrioritizers: sets.NewString(
|
||||
"PriorityOne",
|
||||
"PriorityTwo",
|
||||
),
|
||||
},
|
||||
}
|
||||
{
|
||||
policy: `{
|
||||
"kind" : "Policy",
|
||||
"apiVersion" : "v1"
|
||||
}`,
|
||||
expectedPredicates: sets.NewString(
|
||||
"CheckNodeCondition", // mandatory predicate
|
||||
"CheckNodeDiskPressure",
|
||||
"CheckNodeMemoryPressure",
|
||||
"CheckVolumeBinding",
|
||||
"GeneralPredicates",
|
||||
"MatchInterPodAffinity",
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
"NoDiskConflict",
|
||||
"NoVolumeZoneConflict",
|
||||
"PodToleratesNodeTaints",
|
||||
),
|
||||
expectedPrioritizers: sets.NewString(
|
||||
"BalancedResourceAllocation",
|
||||
"InterPodAffinityPriority",
|
||||
"LeastRequestedPriority",
|
||||
"NodeAffinityPriority",
|
||||
"NodePreferAvoidPodsPriority",
|
||||
"SelectorSpreadPriority",
|
||||
"TaintTolerationPriority",
|
||||
),
|
||||
},
|
||||
{
|
||||
policy: `{
|
||||
"kind" : "Policy",
|
||||
"apiVersion" : "v1",
|
||||
"predicates" : [],
|
||||
"priorities" : []
|
||||
}`,
|
||||
expectedPredicates: sets.NewString(
|
||||
"CheckNodeCondition", // mandatory predicate
|
||||
),
|
||||
expectedPrioritizers: sets.NewString(),
|
||||
},
|
||||
} {
|
||||
// Add a ConfigMap object.
|
||||
configPolicyName := fmt.Sprintf("scheduler-custom-policy-config-%d", i)
|
||||
policyConfigMap := v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: configPolicyName},
|
||||
Data: map[string]string{componentconfig.SchedulerPolicyConfigMapKey: test.policy},
|
||||
}
|
||||
|
||||
policyConfigMap.APIVersion = testapi.Groups[v1.GroupName].GroupVersion().String()
|
||||
clientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(&policyConfigMap)
|
||||
policyConfigMap.APIVersion = testapi.Groups[v1.GroupName].GroupVersion().String()
|
||||
clientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(&policyConfigMap)
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.CoreV1().RESTClient()).Events("")})
|
||||
|
||||
ss := &schedulerapp.SchedulerServer{
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
Namespace: policyConfigMap.Namespace,
|
||||
Name: policyConfigMap.Name,
|
||||
ss := &schedulerapp.SchedulerServer{
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
Namespace: policyConfigMap.Namespace,
|
||||
Name: policyConfigMap.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
Client: clientSet,
|
||||
InformerFactory: informerFactory,
|
||||
PodInformer: factory.NewPodInformer(clientSet, 0, v1.DefaultSchedulerName),
|
||||
EventClient: clientSet.CoreV1(),
|
||||
Recorder: eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
|
||||
Broadcaster: eventBroadcaster,
|
||||
}
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
Client: clientSet,
|
||||
InformerFactory: informerFactory,
|
||||
PodInformer: factory.NewPodInformer(clientSet, 0, v1.DefaultSchedulerName),
|
||||
EventClient: clientSet.CoreV1(),
|
||||
Recorder: eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
|
||||
Broadcaster: eventBroadcaster,
|
||||
}
|
||||
|
||||
config, err := ss.SchedulerConfig()
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't make scheduler config: %v", err)
|
||||
}
|
||||
config, err := ss.SchedulerConfig()
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't make scheduler config: %v", err)
|
||||
}
|
||||
|
||||
// Verify that the config is applied correctly.
|
||||
schedPredicates := config.Algorithm.Predicates()
|
||||
schedPrioritizers := config.Algorithm.Prioritizers()
|
||||
// Includes one mandatory predicates.
|
||||
if len(schedPredicates) != 3 || len(schedPrioritizers) != 2 {
|
||||
t.Errorf("Unexpected number of predicates or priority functions. Number of predicates: %v, number of prioritizers: %v", len(schedPredicates), len(schedPrioritizers))
|
||||
}
|
||||
// Check a predicate and a priority function.
|
||||
if schedPredicates["PredicateTwo"] == nil {
|
||||
t.Errorf("Expected to have a PodFitsHostPorts predicate.")
|
||||
}
|
||||
if schedPrioritizers[1].Function == nil || schedPrioritizers[1].Weight != 5 {
|
||||
t.Errorf("Unexpected prioritizer: func: %v, weight: %v", schedPrioritizers[1].Function, schedPrioritizers[1].Weight)
|
||||
// Verify that the config is applied correctly.
|
||||
schedPredicates := sets.NewString()
|
||||
for k := range config.Algorithm.Predicates() {
|
||||
schedPredicates.Insert(k)
|
||||
}
|
||||
schedPrioritizers := sets.NewString()
|
||||
for _, p := range config.Algorithm.Prioritizers() {
|
||||
schedPrioritizers.Insert(p.Name)
|
||||
}
|
||||
if !schedPredicates.Equal(test.expectedPredicates) {
|
||||
t.Errorf("Expected predicates %v, got %v", test.expectedPredicates, schedPredicates)
|
||||
}
|
||||
if !schedPrioritizers.Equal(test.expectedPrioritizers) {
|
||||
t.Errorf("Expected priority functions %v, got %v", test.expectedPrioritizers, schedPrioritizers)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
34
vendor/k8s.io/kubernetes/test/integration/scheduler/taint_test.go
generated
vendored
34
vendor/k8s.io/kubernetes/test/integration/scheduler/taint_test.go
generated
vendored
@ -37,22 +37,21 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
internalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
"k8s.io/kubernetes/pkg/controller/node"
|
||||
"k8s.io/kubernetes/pkg/controller/node/ipam"
|
||||
"k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||
kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction"
|
||||
pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
// TestTaintNodeByCondition verifies:
|
||||
// 1. MemoryPressure Toleration is added to non-BestEffort Pod by PodTolerationRestriction
|
||||
// 2. NodeController taints nodes by node condition
|
||||
// 3. Scheduler allows pod to tolerate node condition taints, e.g. network unavailabe
|
||||
// 3. Scheduler allows pod to tolerate node condition taints, e.g. network unavailable
|
||||
func TestTaintNodeByCondition(t *testing.T) {
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
@ -85,29 +84,24 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
controllerCh := make(chan struct{})
|
||||
defer close(controllerCh)
|
||||
|
||||
// Start NodeController for taint.
|
||||
nc, err := node.NewNodeController(
|
||||
// Start NodeLifecycleController for taint.
|
||||
nc, err := nodelifecycle.NewNodeLifecycleController(
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Core().V1().Nodes(),
|
||||
informers.Extensions().V1beta1().DaemonSets(),
|
||||
nil, // CloudProvider
|
||||
clientset,
|
||||
time.Second, // Node monitor grace period
|
||||
time.Second, // Node startup grace period
|
||||
time.Second, // Node monitor period
|
||||
time.Second, // Pod eviction timeout
|
||||
100, // Eviction limiter QPS
|
||||
100, // Secondary eviction limiter QPS
|
||||
100, // Large cluster threshold
|
||||
100, // Unhealthy zone threshold
|
||||
time.Second, // Node monitor grace period
|
||||
time.Second, // Node startup grace period
|
||||
time.Second, // Node monitor period
|
||||
nil, // Cluster CIDR
|
||||
nil, // Service CIDR
|
||||
0, // Node CIDR mask size
|
||||
false, // Allocate node CIDRs
|
||||
ipam.RangeAllocatorType, // Allocator type
|
||||
true, // Run taint manger
|
||||
true, // Enabled taint based eviction
|
||||
true, // Enabled TaintNodeByCondition feature
|
||||
true, // Run taint manager
|
||||
true, // Use taint based evictions
|
||||
true, // Enabled TaintNodeByCondition feature
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create node controller: %v", err)
|
||||
|
30
vendor/k8s.io/kubernetes/test/integration/scheduler/util.go
generated
vendored
30
vendor/k8s.io/kubernetes/test/integration/scheduler/util.go
generated
vendored
@ -35,9 +35,9 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
||||
"net/http/httptest"
|
||||
@ -362,6 +362,18 @@ func waitForPodToSchedule(cs clientset.Interface, pod *v1.Pod) error {
|
||||
return waitForPodToScheduleWithTimeout(cs, pod, 30*time.Second)
|
||||
}
|
||||
|
||||
// waitForPodUnscheduleWithTimeout waits for a pod to fail scheduling and returns
|
||||
// an error if it does not become unschedulable within the given timeout.
|
||||
func waitForPodUnschedulableWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
|
||||
return wait.Poll(100*time.Millisecond, timeout, podUnschedulable(cs, pod.Namespace, pod.Name))
|
||||
}
|
||||
|
||||
// waitForPodUnschedule waits for a pod to fail scheduling and returns
|
||||
// an error if it does not become unschedulable within the timeout duration (30 seconds).
|
||||
func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error {
|
||||
return waitForPodUnschedulableWithTimeout(cs, pod, 30*time.Second)
|
||||
}
|
||||
|
||||
// deletePod deletes the given pod in the given namespace.
|
||||
func deletePod(cs clientset.Interface, podName string, nsName string) error {
|
||||
return cs.CoreV1().Pods(nsName).Delete(podName, metav1.NewDeleteOptions(0))
|
||||
@ -381,15 +393,3 @@ func cleanupPods(cs clientset.Interface, t *testing.T, pods []*v1.Pod) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// printAllPods prints a list of all the pods and their node names. This is used
|
||||
// for debugging.
|
||||
func printAllPods(t *testing.T, cs clientset.Interface, nsName string) {
|
||||
podList, err := cs.CoreV1().Pods(nsName).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Logf("Error getting pods: %v", err)
|
||||
}
|
||||
for _, pod := range podList.Items {
|
||||
t.Logf("Pod:\n\tName:%v\n\tNamespace:%v\n\tNode Name:%v\n", pod.Name, pod.Namespace, pod.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
|
297
vendor/k8s.io/kubernetes/test/integration/scheduler/volume_binding_test.go
generated
vendored
297
vendor/k8s.io/kubernetes/test/integration/scheduler/volume_binding_test.go
generated
vendored
@ -22,6 +22,8 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -31,16 +33,18 @@ import (
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
@ -64,78 +68,103 @@ var (
|
||||
)
|
||||
|
||||
const (
|
||||
labelKey = "test-label"
|
||||
labelValue = "test-value"
|
||||
nodeName = "node1"
|
||||
podLimit = 100
|
||||
volsPerPod = 5
|
||||
node1 = "node-1"
|
||||
node2 = "node-2"
|
||||
podLimit = 100
|
||||
volsPerPod = 5
|
||||
nodeAffinityLabelKey = "kubernetes.io/hostname"
|
||||
)
|
||||
|
||||
func TestVolumeBinding(t *testing.T) {
|
||||
config := setup(t, "volume-scheduling")
|
||||
config := setupCluster(t, "volume-scheduling", 2)
|
||||
defer config.teardown()
|
||||
|
||||
cases := map[string]struct {
|
||||
pod *v1.Pod
|
||||
pvs []*v1.PersistentVolume
|
||||
pvcs []*v1.PersistentVolumeClaim
|
||||
// Create these, but they should not be bound in the end
|
||||
unboundPvcs []*v1.PersistentVolumeClaim
|
||||
unboundPvs []*v1.PersistentVolume
|
||||
shouldFail bool
|
||||
}{
|
||||
"immediate can bind": {
|
||||
pod: makePod("pod-i-canbind", config.ns, []string{"pvc-i-canbind"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-i-canbind", classImmediate, "", "")},
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-i-canbind", classImmediate, "", "", node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-canbind", config.ns, &classImmediate, "")},
|
||||
},
|
||||
"immediate cannot bind": {
|
||||
pod: makePod("pod-i-cannotbind", config.ns, []string{"pvc-i-cannotbind"}),
|
||||
unboundPvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-cannotbind", config.ns, &classImmediate, "")},
|
||||
shouldFail: true,
|
||||
},
|
||||
"immediate pvc prebound": {
|
||||
pod: makePod("pod-i-pvc-prebound", config.ns, []string{"pvc-i-prebound"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-i-pvc-prebound", classImmediate, "", "")},
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-i-pvc-prebound", classImmediate, "", "", node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-prebound", config.ns, &classImmediate, "pv-i-pvc-prebound")},
|
||||
},
|
||||
"immediate pv prebound": {
|
||||
pod: makePod("pod-i-pv-prebound", config.ns, []string{"pvc-i-pv-prebound"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-i-prebound", classImmediate, "pvc-i-pv-prebound", config.ns)},
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-i-prebound", classImmediate, "pvc-i-pv-prebound", config.ns, node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-pv-prebound", config.ns, &classImmediate, "")},
|
||||
},
|
||||
"wait can bind": {
|
||||
pod: makePod("pod-w-canbind", config.ns, []string{"pvc-w-canbind"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-w-canbind", classWait, "", "")},
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-w-canbind", classWait, "", "", node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-canbind", config.ns, &classWait, "")},
|
||||
},
|
||||
"wait cannot bind": {
|
||||
pod: makePod("pod-w-cannotbind", config.ns, []string{"pvc-w-cannotbind"}),
|
||||
unboundPvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-cannotbind", config.ns, &classWait, "")},
|
||||
shouldFail: true,
|
||||
},
|
||||
"wait pvc prebound": {
|
||||
pod: makePod("pod-w-pvc-prebound", config.ns, []string{"pvc-w-prebound"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-w-pvc-prebound", classWait, "", "")},
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-w-pvc-prebound", classWait, "", "", node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-prebound", config.ns, &classWait, "pv-w-pvc-prebound")},
|
||||
},
|
||||
"wait pv prebound": {
|
||||
pod: makePod("pod-w-pv-prebound", config.ns, []string{"pvc-w-pv-prebound"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-w-prebound", classWait, "pvc-w-pv-prebound", config.ns)},
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-w-prebound", classWait, "pvc-w-pv-prebound", config.ns, node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-pv-prebound", config.ns, &classWait, "")},
|
||||
},
|
||||
"wait can bind two": {
|
||||
pod: makePod("pod-w-canbind-2", config.ns, []string{"pvc-w-canbind-2", "pvc-w-canbind-3"}),
|
||||
pvs: []*v1.PersistentVolume{
|
||||
makePV(t, "pv-w-canbind-2", classWait, "", ""),
|
||||
makePV(t, "pv-w-canbind-3", classWait, "", ""),
|
||||
makePV(t, "pv-w-canbind-2", classWait, "", "", node2),
|
||||
makePV(t, "pv-w-canbind-3", classWait, "", "", node2),
|
||||
},
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-w-canbind-2", config.ns, &classWait, ""),
|
||||
makePVC("pvc-w-canbind-3", config.ns, &classWait, ""),
|
||||
},
|
||||
unboundPvs: []*v1.PersistentVolume{
|
||||
makePV(t, "pv-w-canbind-5", classWait, "", "", node1),
|
||||
},
|
||||
},
|
||||
"wait cannot bind two": {
|
||||
pod: makePod("pod-w-cannotbind-2", config.ns, []string{"pvc-w-cannotbind-1", "pvc-w-cannotbind-2"}),
|
||||
unboundPvcs: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-w-cannotbind-1", config.ns, &classWait, ""),
|
||||
makePVC("pvc-w-cannotbind-2", config.ns, &classWait, ""),
|
||||
},
|
||||
unboundPvs: []*v1.PersistentVolume{
|
||||
makePV(t, "pv-w-cannotbind-1", classWait, "", "", node2),
|
||||
makePV(t, "pv-w-cannotbind-2", classWait, "", "", node1),
|
||||
},
|
||||
shouldFail: true,
|
||||
},
|
||||
"mix immediate and wait": {
|
||||
pod: makePod("pod-mix-bound", config.ns, []string{"pvc-w-canbind-4", "pvc-i-canbind-2"}),
|
||||
pvs: []*v1.PersistentVolume{
|
||||
makePV(t, "pv-w-canbind-4", classWait, "", ""),
|
||||
makePV(t, "pv-i-canbind-2", classImmediate, "", ""),
|
||||
makePV(t, "pv-w-canbind-4", classWait, "", "", node1),
|
||||
makePV(t, "pv-i-canbind-2", classImmediate, "", "", node1),
|
||||
},
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-w-canbind-4", config.ns, &classWait, ""),
|
||||
makePVC("pvc-i-canbind-2", config.ns, &classImmediate, ""),
|
||||
},
|
||||
},
|
||||
// TODO:
|
||||
// immediate mode - PVC cannot bound
|
||||
// wait mode - PVC cannot bind
|
||||
// wait mode - 2 PVCs, 1 cannot bind
|
||||
}
|
||||
|
||||
for name, test := range cases {
|
||||
@ -148,28 +177,51 @@ func TestVolumeBinding(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
for _, pv := range test.unboundPvs {
|
||||
if _, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create PVCs
|
||||
for _, pvc := range test.pvcs {
|
||||
if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err)
|
||||
}
|
||||
}
|
||||
for _, pvc := range test.unboundPvcs {
|
||||
if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create Pod
|
||||
if _, err := config.client.CoreV1().Pods(config.ns).Create(test.pod); err != nil {
|
||||
t.Fatalf("Failed to create Pod %q: %v", test.pod.Name, err)
|
||||
}
|
||||
if err := waitForPodToSchedule(config.client, test.pod); err != nil {
|
||||
t.Errorf("Failed to schedule Pod %q: %v", test.pod.Name, err)
|
||||
if test.shouldFail {
|
||||
if err := waitForPodUnschedulable(config.client, test.pod); err != nil {
|
||||
t.Errorf("Pod %q was not unschedulable: %v", test.pod.Name, err)
|
||||
}
|
||||
} else {
|
||||
if err := waitForPodToSchedule(config.client, test.pod); err != nil {
|
||||
t.Errorf("Failed to schedule Pod %q: %v", test.pod.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate PVC/PV binding
|
||||
for _, pvc := range test.pvcs {
|
||||
validatePVCPhase(t, config.client, pvc, v1.ClaimBound)
|
||||
}
|
||||
for _, pvc := range test.unboundPvcs {
|
||||
validatePVCPhase(t, config.client, pvc, v1.ClaimPending)
|
||||
}
|
||||
for _, pv := range test.pvs {
|
||||
validatePVPhase(t, config.client, pv, v1.VolumeBound)
|
||||
}
|
||||
for _, pv := range test.unboundPvs {
|
||||
validatePVPhase(t, config.client, pv, v1.VolumeAvailable)
|
||||
}
|
||||
|
||||
// TODO: validate events on Pods and PVCs
|
||||
|
||||
@ -181,14 +233,14 @@ func TestVolumeBinding(t *testing.T) {
|
||||
|
||||
// TestVolumeBindingStress creates <podLimit> pods, each with <volsPerPod> unbound PVCs.
|
||||
func TestVolumeBindingStress(t *testing.T) {
|
||||
config := setup(t, "volume-binding-stress")
|
||||
config := setupCluster(t, "volume-binding-stress", 1)
|
||||
defer config.teardown()
|
||||
|
||||
// Create enough PVs and PVCs for all the pods
|
||||
pvs := []*v1.PersistentVolume{}
|
||||
pvcs := []*v1.PersistentVolumeClaim{}
|
||||
for i := 0; i < podLimit*volsPerPod; i++ {
|
||||
pv := makePV(t, fmt.Sprintf("pv-stress-%v", i), classWait, "", "")
|
||||
pv := makePV(t, fmt.Sprintf("pv-stress-%v", i), classWait, "", "", node1)
|
||||
pvc := makePVC(fmt.Sprintf("pvc-stress-%v", i), config.ns, &classWait, "")
|
||||
|
||||
if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
@ -235,7 +287,66 @@ func TestVolumeBindingStress(t *testing.T) {
|
||||
// TODO: validate events on Pods and PVCs
|
||||
}
|
||||
|
||||
func setup(t *testing.T, nsName string) *testConfig {
|
||||
func TestPVAffinityConflict(t *testing.T) {
|
||||
config := setupCluster(t, "volume-scheduling", 3)
|
||||
defer config.teardown()
|
||||
|
||||
pv := makePV(t, "local-pv", classImmediate, "", "", node1)
|
||||
pvc := makePVC("local-pvc", config.ns, &classImmediate, "")
|
||||
|
||||
// Create PV
|
||||
if _, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||
}
|
||||
|
||||
// Create PVC
|
||||
if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err)
|
||||
}
|
||||
|
||||
// Wait for PVC bound
|
||||
if err := waitForPVCBound(config.client, pvc); err != nil {
|
||||
t.Fatalf("PVC %q failed to bind: %v", pvc.Name, err)
|
||||
}
|
||||
|
||||
nodeMarkers := []interface{}{
|
||||
markNodeAffinity,
|
||||
markNodeSelector,
|
||||
}
|
||||
for i := 0; i < len(nodeMarkers); i++ {
|
||||
podName := "local-pod-" + strconv.Itoa(i+1)
|
||||
pod := makePod(podName, config.ns, []string{"local-pvc"})
|
||||
nodeMarkers[i].(func(*v1.Pod, string))(pod, "node-2")
|
||||
// Create Pod
|
||||
if _, err := config.client.CoreV1().Pods(config.ns).Create(pod); err != nil {
|
||||
t.Fatalf("Failed to create Pod %q: %v", pod.Name, err)
|
||||
}
|
||||
// Give time to shceduler to attempt to schedule pod
|
||||
if err := waitForPodUnschedulable(config.client, pod); err != nil {
|
||||
t.Errorf("Failed as Pod %s was not unschedulable: %v", pod.Name, err)
|
||||
}
|
||||
// Check pod conditions
|
||||
p, err := config.client.CoreV1().Pods(config.ns).Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to access Pod %s status: %v", podName, err)
|
||||
}
|
||||
if strings.Compare(string(p.Status.Phase), "Pending") != 0 {
|
||||
t.Fatalf("Failed as Pod %s was in: %s state and not in expected: Pending state", podName, p.Status.Phase)
|
||||
}
|
||||
if strings.Compare(p.Status.Conditions[0].Reason, "Unschedulable") != 0 {
|
||||
t.Fatalf("Failed as Pod %s reason was: %s but expected: Unschedulable", podName, p.Status.Conditions[0].Reason)
|
||||
}
|
||||
if !strings.Contains(p.Status.Conditions[0].Message, "node(s) didn't match node selector") || !strings.Contains(p.Status.Conditions[0].Message, "node(s) had volume node affinity conflict") {
|
||||
t.Fatalf("Failed as Pod's %s failure message does not contain expected message: node(s) didn't match node selector, node(s) had volume node affinity conflict. Got message %q", podName, p.Status.Conditions[0].Message)
|
||||
}
|
||||
// Deleting test pod
|
||||
if err := config.client.CoreV1().Pods(config.ns).Delete(podName, &metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("Failed to delete Pod %s: %v", podName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setupCluster(t *testing.T, nsName string, numberOfNodes int) *testConfig {
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-h.Initialized
|
||||
@ -251,6 +362,7 @@ func setup(t *testing.T, nsName string) *testConfig {
|
||||
|
||||
// Start master
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
|
||||
_, _, closeFn := framework.RunAMasterUsingServer(masterConfig, s, h)
|
||||
ns := framework.CreateTestingNamespace(nsName, s, t).Name
|
||||
|
||||
@ -266,6 +378,7 @@ func setup(t *testing.T, nsName string) *testConfig {
|
||||
VolumeInformer: informers.Core().V1().PersistentVolumes(),
|
||||
ClaimInformer: informers.Core().V1().PersistentVolumeClaims(),
|
||||
ClassInformer: informers.Storage().V1().StorageClasses(),
|
||||
PodInformer: informers.Core().V1().Pods(),
|
||||
EventRecorder: nil, // TODO: add one so we can test PV events
|
||||
EnableDynamicProvisioning: true,
|
||||
}
|
||||
@ -293,13 +406,16 @@ func setup(t *testing.T, nsName string) *testConfig {
|
||||
true, // Enable EqualCache by default.
|
||||
)
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
sched, err := scheduler.NewFromConfigurator(configurator, func(cfg *scheduler.Config) {
|
||||
cfg.StopEverything = controllerCh
|
||||
cfg.Recorder = &record.FakeRecorder{}
|
||||
cfg.Recorder = eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(clientset.CoreV1().RESTClient()).Events("")})
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create scheduler: %v.", err)
|
||||
}
|
||||
|
||||
go sched.Run()
|
||||
|
||||
// Waiting for all controller sync.
|
||||
@ -307,35 +423,37 @@ func setup(t *testing.T, nsName string) *testConfig {
|
||||
informers.WaitForCacheSync(controllerCh)
|
||||
|
||||
// Create shared objects
|
||||
// Create node
|
||||
testNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
Labels: map[string]string{labelKey: labelValue},
|
||||
},
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(podLimit, resource.DecimalSI),
|
||||
// Create nodes
|
||||
for i := 0; i < numberOfNodes; i++ {
|
||||
testNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("node-%d", i+1),
|
||||
Labels: map[string]string{nodeAffinityLabelKey: fmt.Sprintf("node-%d", i+1)},
|
||||
},
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: fmt.Sprintf("schedulable condition"),
|
||||
LastHeartbeatTime: metav1.Time{Time: time.Now()},
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(podLimit, resource.DecimalSI),
|
||||
},
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: fmt.Sprintf("schedulable condition"),
|
||||
LastHeartbeatTime: metav1.Time{Time: time.Now()},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if _, err := clientset.CoreV1().Nodes().Create(testNode); err != nil {
|
||||
t.Fatalf("Failed to create Node %q: %v", testNode.Name, err)
|
||||
}
|
||||
if _, err := clientset.CoreV1().Nodes().Create(testNode); err != nil {
|
||||
t.Fatalf("Failed to create Node %q: %v", testNode.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create SCs
|
||||
scs := []*storagev1.StorageClass{
|
||||
makeStorageClass(classWait, &modeWait),
|
||||
makeStorageClass(classImmediate, &modeImmediate),
|
||||
makeStorageClass(classWait, &modeWait),
|
||||
}
|
||||
for _, sc := range scs {
|
||||
if _, err := clientset.StorageV1().StorageClasses().Create(sc); err != nil {
|
||||
@ -370,7 +488,7 @@ func makeStorageClass(name string, mode *storagev1.VolumeBindingMode) *storagev1
|
||||
}
|
||||
}
|
||||
|
||||
func makePV(t *testing.T, name, scName, pvcName, ns string) *v1.PersistentVolume {
|
||||
func makePV(t *testing.T, name, scName, pvcName, ns, node string) *v1.PersistentVolume {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@ -389,6 +507,21 @@ func makePV(t *testing.T, name, scName, pvcName, ns string) *v1.PersistentVolume
|
||||
Path: "/test-path",
|
||||
},
|
||||
},
|
||||
NodeAffinity: &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeAffinityLabelKey,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{node},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@ -396,25 +529,6 @@ func makePV(t *testing.T, name, scName, pvcName, ns string) *v1.PersistentVolume
|
||||
pv.Spec.ClaimRef = &v1.ObjectReference{Name: pvcName, Namespace: ns}
|
||||
}
|
||||
|
||||
testNodeAffinity := &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: labelKey,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{labelValue},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := helper.StorageNodeAffinityToAlphaAnnotation(pv.Annotations, testNodeAffinity)
|
||||
if err != nil {
|
||||
t.Fatalf("Setting storage node affinity failed: %v", err)
|
||||
}
|
||||
return pv
|
||||
}
|
||||
|
||||
@ -461,7 +575,7 @@ func makePod(name, ns string, pvcs []string) *v1.Pod {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "write-pod",
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Image: "k8s.gcr.io/busybox:1.24",
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", "while true; do sleep 1; done"},
|
||||
},
|
||||
@ -492,3 +606,44 @@ func validatePVPhase(t *testing.T, client clientset.Interface, pv *v1.Persistent
|
||||
t.Errorf("PV %v phase not %v, got %v", pv.Name, phase, pv.Status.Phase)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForPVCBound(client clientset.Interface, pvc *v1.PersistentVolumeClaim) error {
|
||||
return wait.Poll(time.Second, 30*time.Second, func() (bool, error) {
|
||||
claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if claim.Status.Phase == v1.ClaimBound {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
func markNodeAffinity(pod *v1.Pod, node string) {
|
||||
affinity := &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeAffinityLabelKey,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{node},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod.Spec.Affinity = affinity
|
||||
}
|
||||
|
||||
func markNodeSelector(pod *v1.Pod, node string) {
|
||||
ns := map[string]string{
|
||||
nodeAffinityLabelKey: node,
|
||||
}
|
||||
pod.Spec.NodeSelector = ns
|
||||
}
|
||||
|
18
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/BUILD
generated
vendored
18
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/BUILD
generated
vendored
@ -14,19 +14,13 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/scheduler_perf",
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//plugin/pkg/scheduler:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//plugin/pkg/scheduler/factory:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//test/integration/util:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -38,11 +32,11 @@ go_test(
|
||||
"scheduler_bench_test.go",
|
||||
"scheduler_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/scheduler_perf",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//plugin/pkg/scheduler:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
|
104
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_bench_test.go
generated
vendored
104
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_bench_test.go
generated
vendored
@ -17,44 +17,98 @@ limitations under the License.
|
||||
package benchmark
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// BenchmarkScheduling100Nodes0Pods benchmarks the scheduling rate
|
||||
// when the cluster has 100 nodes and 0 scheduled pods
|
||||
func BenchmarkScheduling100Nodes0Pods(b *testing.B) {
|
||||
benchmarkScheduling(100, 0, b)
|
||||
// BenchmarkScheduling benchmarks the scheduling rate when the cluster has
|
||||
// various quantities of nodes and scheduled pods.
|
||||
func BenchmarkScheduling(b *testing.B) {
|
||||
tests := []struct{ nodes, existingPods, minPods int }{
|
||||
{nodes: 100, existingPods: 0, minPods: 100},
|
||||
{nodes: 100, existingPods: 1000, minPods: 100},
|
||||
{nodes: 1000, existingPods: 0, minPods: 100},
|
||||
{nodes: 1000, existingPods: 1000, minPods: 100},
|
||||
}
|
||||
setupStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("rc1")
|
||||
testStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("rc2")
|
||||
for _, test := range tests {
|
||||
name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
|
||||
b.Run(name, func(b *testing.B) {
|
||||
benchmarkScheduling(test.nodes, test.existingPods, test.minPods, setupStrategy, testStrategy, b)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkScheduling100Nodes1000Pods benchmarks the scheduling rate
|
||||
// when the cluster has 100 nodes and 1000 scheduled pods
|
||||
func BenchmarkScheduling100Nodes1000Pods(b *testing.B) {
|
||||
benchmarkScheduling(100, 1000, b)
|
||||
// BenchmarkSchedulingAntiAffinity benchmarks the scheduling rate of pods with
|
||||
// PodAntiAffinity rules when the cluster has various quantities of nodes and
|
||||
// scheduled pods.
|
||||
func BenchmarkSchedulingAntiAffinity(b *testing.B) {
|
||||
tests := []struct{ nodes, existingPods, minPods int }{
|
||||
{nodes: 500, existingPods: 250, minPods: 250},
|
||||
{nodes: 500, existingPods: 5000, minPods: 250},
|
||||
}
|
||||
// The setup strategy creates pods with no affinity rules.
|
||||
setupStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("setup")
|
||||
// The test strategy creates pods with anti-affinity for each other.
|
||||
testBasePod := makeBasePodWithAntiAffinity(
|
||||
map[string]string{"name": "test", "color": "green"},
|
||||
map[string]string{"color": "green"})
|
||||
testStrategy := testutils.NewCustomCreatePodStrategy(testBasePod)
|
||||
for _, test := range tests {
|
||||
name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
|
||||
b.Run(name, func(b *testing.B) {
|
||||
benchmarkScheduling(test.nodes, test.existingPods, test.minPods, setupStrategy, testStrategy, b)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// BenchmarkScheduling1000Nodes0Pods benchmarks the scheduling rate
|
||||
// when the cluster has 1000 nodes and 0 scheduled pods
|
||||
func BenchmarkScheduling1000Nodes0Pods(b *testing.B) {
|
||||
benchmarkScheduling(1000, 0, b)
|
||||
}
|
||||
|
||||
// BenchmarkScheduling1000Nodes1000Pods benchmarks the scheduling rate
|
||||
// when the cluster has 1000 nodes and 1000 scheduled pods
|
||||
func BenchmarkScheduling1000Nodes1000Pods(b *testing.B) {
|
||||
benchmarkScheduling(1000, 1000, b)
|
||||
// makeBasePodWithAntiAffinity creates a Pod object to be used as a template.
|
||||
// The Pod has a PodAntiAffinity requirement against pods with the given labels.
|
||||
func makeBasePodWithAntiAffinity(podLabels, affinityLabels map[string]string) *v1.Pod {
|
||||
basePod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "affinity-pod-",
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: testutils.MakePodSpec(),
|
||||
}
|
||||
basePod.Spec.Affinity = &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: affinityLabels,
|
||||
},
|
||||
TopologyKey: apis.LabelHostname,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return basePod
|
||||
}
|
||||
|
||||
// benchmarkScheduling benchmarks scheduling rate with specific number of nodes
|
||||
// and specific number of pods already scheduled. Since an operation takes relatively
|
||||
// long time, b.N should be small: 10 - 100.
|
||||
func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
|
||||
// and specific number of pods already scheduled.
|
||||
// This will schedule numExistingPods pods before the benchmark starts, and at
|
||||
// least minPods pods during the benchmark.
|
||||
func benchmarkScheduling(numNodes, numExistingPods, minPods int,
|
||||
setupPodStrategy, testPodStrategy testutils.TestPodCreateStrategy,
|
||||
b *testing.B) {
|
||||
if b.N < minPods {
|
||||
b.N = minPods
|
||||
}
|
||||
schedulerConfigFactory, finalFunc := mustSetupScheduler()
|
||||
defer finalFunc()
|
||||
c := schedulerConfigFactory.GetClient()
|
||||
@ -70,7 +124,7 @@ func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
|
||||
defer nodePreparer.CleanupNodes()
|
||||
|
||||
config := testutils.NewTestPodCreatorConfig()
|
||||
config.AddStrategy("sched-test", numScheduledPods, testutils.NewSimpleWithControllerCreatePodStrategy("rc1"))
|
||||
config.AddStrategy("sched-test", numExistingPods, setupPodStrategy)
|
||||
podCreator := testutils.NewTestPodCreator(c, config)
|
||||
podCreator.CreatePods()
|
||||
|
||||
@ -79,7 +133,7 @@ func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
if len(scheduled) >= numScheduledPods {
|
||||
if len(scheduled) >= numExistingPods {
|
||||
break
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
@ -87,7 +141,7 @@ func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
|
||||
// start benchmark
|
||||
b.ResetTimer()
|
||||
config = testutils.NewTestPodCreatorConfig()
|
||||
config.AddStrategy("sched-test", b.N, testutils.NewSimpleWithControllerCreatePodStrategy("rc2"))
|
||||
config.AddStrategy("sched-test", b.N, testPodStrategy)
|
||||
podCreator = testutils.NewTestPodCreator(c, config)
|
||||
podCreator.CreatePods()
|
||||
for {
|
||||
@ -97,7 +151,7 @@ func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
if len(scheduled) >= numScheduledPods+b.N {
|
||||
if len(scheduled) >= numExistingPods+b.N {
|
||||
break
|
||||
}
|
||||
// Note: This might introduce slight deviation in accuracy of benchmark results.
|
||||
|
6
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_test.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
"math"
|
||||
"strconv"
|
||||
@ -144,7 +144,7 @@ func schedulePods(config *testConfig) int32 {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
// 30,000 pods -> wait till @ least 300 are scheduled to start measuring.
|
||||
// TODO Find out why sometimes there may be scheduling blips in the beggining.
|
||||
// TODO Find out why sometimes there may be scheduling blips in the beginning.
|
||||
if len(scheduled) > config.numPods/100 {
|
||||
break
|
||||
}
|
||||
@ -258,7 +258,7 @@ func (inputConfig *schedulerPerfConfig) generatePodAndNodeTopology(config *testC
|
||||
}
|
||||
|
||||
// writePodAndNodeTopologyToConfig reads a configuration and then applies it to a test configuration.
|
||||
//TODO: As of now, this function is not doing anything expect for reading input values to priority structs.
|
||||
//TODO: As of now, this function is not doing anything except for reading input values to priority structs.
|
||||
func writePodAndNodeTopologyToConfig(config *testConfig) error {
|
||||
// High Level structure that should be filled for every predicate or priority.
|
||||
inputConfig := &schedulerPerfConfig{
|
||||
|
77
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/util.go
generated
vendored
77
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/util.go
generated
vendored
@ -17,26 +17,15 @@ limitations under the License.
|
||||
package benchmark
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientv1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/test/integration/util"
|
||||
)
|
||||
|
||||
const enableEquivalenceCache = true
|
||||
|
||||
// mustSetupScheduler starts the following components:
|
||||
// - k8s api server (a.k.a. master)
|
||||
// - scheduler
|
||||
@ -44,63 +33,19 @@ const enableEquivalenceCache = true
|
||||
// remove resources after finished.
|
||||
// Notes on rate limiter:
|
||||
// - client rate limit is set to 5000.
|
||||
func mustSetupScheduler() (schedulerConfigurator scheduler.Configurator, destroyFunc func()) {
|
||||
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-h.Initialized
|
||||
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
|
||||
framework.RunAMasterUsingServer(framework.NewIntegrationTestMasterConfig(), s, h)
|
||||
|
||||
func mustSetupScheduler() (scheduler.Configurator, util.ShutdownFunc) {
|
||||
apiURL, apiShutdown := util.StartApiserver()
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{
|
||||
Host: s.URL,
|
||||
Host: apiURL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()},
|
||||
QPS: 5000.0,
|
||||
Burst: 5000,
|
||||
})
|
||||
schedulerConfig, schedulerShutdown := util.StartScheduler(clientSet, true)
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
|
||||
schedulerConfigurator = factory.NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
clientSet,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
enableEquivalenceCache,
|
||||
)
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.CoreV1().RESTClient()).Events("")})
|
||||
|
||||
sched, err := scheduler.NewFromConfigurator(schedulerConfigurator, func(conf *scheduler.Config) {
|
||||
conf.Recorder = eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "scheduler"})
|
||||
})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating scheduler: %v", err)
|
||||
shutdownFunc := func() {
|
||||
schedulerShutdown()
|
||||
apiShutdown()
|
||||
}
|
||||
|
||||
stop := make(chan struct{})
|
||||
informerFactory.Start(stop)
|
||||
|
||||
sched.Run()
|
||||
|
||||
destroyFunc = func() {
|
||||
glog.Infof("destroying")
|
||||
sched.StopEverything()
|
||||
close(stop)
|
||||
s.Close()
|
||||
glog.Infof("destroyed")
|
||||
}
|
||||
return
|
||||
return schedulerConfig, shutdownFunc
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/test/integration/secrets/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/integration/secrets/BUILD
generated
vendored
@ -12,7 +12,6 @@ go_test(
|
||||
"main_test.go",
|
||||
"secrets_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/secrets",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
|
1
vendor/k8s.io/kubernetes/test/integration/serviceaccount/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/integration/serviceaccount/BUILD
generated
vendored
@ -12,7 +12,6 @@ go_test(
|
||||
"main_test.go",
|
||||
"service_account_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/serviceaccount",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
|
8
vendor/k8s.io/kubernetes/test/integration/serviceaccount/service_account_test.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/integration/serviceaccount/service_account_test.go
generated
vendored
@ -375,7 +375,7 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
||||
})
|
||||
serviceAccountKey, _ := rsa.GenerateKey(rand.Reader, 2048)
|
||||
serviceAccountTokenGetter := serviceaccountcontroller.NewGetterFromClient(rootClientset)
|
||||
serviceAccountTokenAuth := serviceaccount.JWTTokenAuthenticator([]interface{}{&serviceAccountKey.PublicKey}, true, serviceAccountTokenGetter)
|
||||
serviceAccountTokenAuth := serviceaccount.JWTTokenAuthenticator(serviceaccount.LegacyIssuer, []interface{}{&serviceAccountKey.PublicKey}, serviceaccount.NewLegacyValidator(true, serviceAccountTokenGetter))
|
||||
authenticator := union.New(
|
||||
bearertoken.New(rootTokenAuth),
|
||||
bearertoken.New(serviceAccountTokenAuth),
|
||||
@ -425,8 +425,8 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
||||
|
||||
masterConfig := framework.NewMasterConfig()
|
||||
masterConfig.GenericConfig.EnableIndex = true
|
||||
masterConfig.GenericConfig.Authenticator = authenticator
|
||||
masterConfig.GenericConfig.Authorizer = authorizer
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = authenticator
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = authorizer
|
||||
masterConfig.GenericConfig.AdmissionControl = serviceAccountAdmission
|
||||
framework.RunAMasterUsingServer(masterConfig, apiServer, h)
|
||||
|
||||
@ -442,7 +442,7 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
||||
informers.Core().V1().ServiceAccounts(),
|
||||
informers.Core().V1().Secrets(),
|
||||
rootClientset,
|
||||
serviceaccountcontroller.TokensControllerOptions{TokenGenerator: serviceaccount.JWTTokenGenerator(serviceAccountKey)},
|
||||
serviceaccountcontroller.TokensControllerOptions{TokenGenerator: serviceaccount.JWTTokenGenerator(serviceaccount.LegacyIssuer, serviceAccountKey)},
|
||||
)
|
||||
if err != nil {
|
||||
return rootClientset, clientConfig, stop, err
|
||||
|
1
vendor/k8s.io/kubernetes/test/integration/storageclasses/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/integration/storageclasses/BUILD
generated
vendored
@ -12,7 +12,6 @@ go_test(
|
||||
"main_test.go",
|
||||
"storage_classes_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/storageclasses",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
|
35
vendor/k8s.io/kubernetes/test/integration/tls/BUILD
generated
vendored
Normal file
35
vendor/k8s.io/kubernetes/test/integration/tls/BUILD
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_test")
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"ciphers_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app:go_default_library",
|
||||
"//cmd/kube-apiserver/app/options:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
169
vendor/k8s.io/kubernetes/test/integration/tls/ciphers_test.go
generated
vendored
Normal file
169
vendor/k8s.io/kubernetes/test/integration/tls/ciphers_test.go
generated
vendored
Normal file
@ -0,0 +1,169 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package tls
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
genericapiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
client "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func runBasicSecureAPIServer(t *testing.T, ciphers []string) (uint32, error) {
|
||||
certDir, _ := ioutil.TempDir("", "test-integration-tls")
|
||||
defer os.RemoveAll(certDir)
|
||||
_, defaultServiceClusterIPRange, _ := net.ParseCIDR("10.0.0.0/24")
|
||||
kubeClientConfigValue := atomic.Value{}
|
||||
var kubePort uint32
|
||||
|
||||
go func() {
|
||||
listener, port, err := genericapiserveroptions.CreateListener("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
atomic.StoreUint32(&kubePort, uint32(port))
|
||||
|
||||
kubeAPIServerOptions := options.NewServerRunOptions()
|
||||
kubeAPIServerOptions.SecureServing.BindAddress = net.ParseIP("127.0.0.1")
|
||||
kubeAPIServerOptions.SecureServing.BindPort = port
|
||||
kubeAPIServerOptions.SecureServing.Listener = listener
|
||||
kubeAPIServerOptions.SecureServing.ServerCert.CertDirectory = certDir
|
||||
kubeAPIServerOptions.SecureServing.CipherSuites = ciphers
|
||||
kubeAPIServerOptions.InsecureServing.BindPort = 0
|
||||
kubeAPIServerOptions.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURL()}
|
||||
kubeAPIServerOptions.ServiceClusterIPRange = *defaultServiceClusterIPRange
|
||||
|
||||
tunneler, proxyTransport, err := app.CreateNodeDialer(kubeAPIServerOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerConfig, sharedInformers, versionedInformers, _, _, err := app.CreateKubeAPIServerConfig(kubeAPIServerOptions, tunneler, proxyTransport)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerConfig.ExtraConfig.EnableCoreControllers = false
|
||||
kubeClientConfigValue.Store(kubeAPIServerConfig.GenericConfig.LoopbackClientConfig)
|
||||
|
||||
kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.EmptyDelegate, sharedInformers, versionedInformers)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := kubeAPIServer.GenericAPIServer.PrepareRun().Run(wait.NeverStop); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}()
|
||||
|
||||
// Ensure server is ready
|
||||
err := wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (done bool, err error) {
|
||||
obj := kubeClientConfigValue.Load()
|
||||
if obj == nil {
|
||||
return false, nil
|
||||
}
|
||||
kubeClientConfig := kubeClientConfigValue.Load().(*rest.Config)
|
||||
kubeClientConfig.ContentType = ""
|
||||
kubeClientConfig.AcceptContentTypes = ""
|
||||
kubeClient, err := client.NewForConfig(kubeClientConfig)
|
||||
if err != nil {
|
||||
// this happens because we race the API server start
|
||||
t.Log(err)
|
||||
return false, nil
|
||||
}
|
||||
if _, err := kubeClient.Discovery().ServerVersion(); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
securePort := atomic.LoadUint32(&kubePort)
|
||||
return securePort, nil
|
||||
}
|
||||
|
||||
func TestAPICiphers(t *testing.T) {
|
||||
|
||||
basicServerCiphers := []string{"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_128_GCM_SHA256", "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA"}
|
||||
|
||||
kubePort, err := runBasicSecureAPIServer(t, basicServerCiphers)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
clientCiphers []uint16
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
// Not supported cipher
|
||||
clientCiphers: []uint16{tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
// Supported cipher
|
||||
clientCiphers: []uint16{tls.TLS_RSA_WITH_AES_256_CBC_SHA},
|
||||
expectedError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
runTestAPICiphers(t, i, kubePort, test.clientCiphers, test.expectedError)
|
||||
}
|
||||
}
|
||||
|
||||
func runTestAPICiphers(t *testing.T, testID int, kubePort uint32, clientCiphers []uint16, expectedError bool) {
|
||||
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
CipherSuites: clientCiphers,
|
||||
},
|
||||
}
|
||||
client := &http.Client{Transport: tr}
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("https://127.0.0.1:%d", kubePort), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp, err := client.Do(req)
|
||||
|
||||
if expectedError == true && err == nil {
|
||||
t.Fatalf("%d: expecting error for cipher test, client cipher is supported and it should't", testID)
|
||||
} else if err != nil && expectedError == false {
|
||||
t.Fatalf("%d: not expecting error by client with cipher failed: %+v", testID, err)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
}
|
27
vendor/k8s.io/kubernetes/test/integration/tls/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/tls/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package tls
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
1
vendor/k8s.io/kubernetes/test/integration/ttlcontroller/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/integration/ttlcontroller/BUILD
generated
vendored
@ -12,7 +12,6 @@ go_test(
|
||||
"main_test.go",
|
||||
"ttlcontroller_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/ttlcontroller",
|
||||
tags = [
|
||||
"etcd",
|
||||
"integration",
|
||||
|
39
vendor/k8s.io/kubernetes/test/integration/util/BUILD
generated
vendored
Normal file
39
vendor/k8s.io/kubernetes/test/integration/util/BUILD
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"util.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/util",
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
103
vendor/k8s.io/kubernetes/test/integration/util/util.go
generated
vendored
Normal file
103
vendor/k8s.io/kubernetes/test/integration/util/util.go
generated
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientv1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
// ShutdownFunc represents the function handle to be called, typically in a defer handler, to shutdown a running module
|
||||
type ShutdownFunc func()
|
||||
|
||||
// StartApiserver starts a local API server for testing and returns the handle to the URL and the shutdown function to stop it.
|
||||
func StartApiserver() (string, ShutdownFunc) {
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-h.Initialized
|
||||
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
|
||||
framework.RunAMasterUsingServer(framework.NewIntegrationTestMasterConfig(), s, h)
|
||||
|
||||
shutdownFunc := func() {
|
||||
glog.Infof("destroying API server")
|
||||
s.Close()
|
||||
glog.Infof("destroyed API server")
|
||||
}
|
||||
return s.URL, shutdownFunc
|
||||
}
|
||||
|
||||
// StartScheduler configures and starts a scheduler given a handle to the clientSet interface
|
||||
// and event broadcaster. It returns a handle to the configurator for the running scheduler
|
||||
// and the shutdown function to stop it.
|
||||
func StartScheduler(clientSet clientset.Interface, enableEquivalenceCache bool) (scheduler.Configurator, ShutdownFunc) {
|
||||
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
|
||||
evtBroadcaster := record.NewBroadcaster()
|
||||
evtWatch := evtBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{
|
||||
Interface: clientv1core.New(clientSet.CoreV1().RESTClient()).Events("")})
|
||||
|
||||
schedulerConfigurator := factory.NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
clientSet,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
enableEquivalenceCache,
|
||||
)
|
||||
|
||||
sched, err := scheduler.NewFromConfigurator(schedulerConfigurator, func(conf *scheduler.Config) {
|
||||
conf.Recorder = evtBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "scheduler"})
|
||||
})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating scheduler: %v", err)
|
||||
}
|
||||
|
||||
stop := make(chan struct{})
|
||||
informerFactory.Start(stop)
|
||||
|
||||
sched.Run()
|
||||
|
||||
shutdownFunc := func() {
|
||||
glog.Infof("destroying scheduler")
|
||||
evtWatch.Stop()
|
||||
sched.StopEverything()
|
||||
close(stop)
|
||||
glog.Infof("destroyed scheduler")
|
||||
}
|
||||
return schedulerConfigurator, shutdownFunc
|
||||
}
|
29
vendor/k8s.io/kubernetes/test/integration/utils.go
generated
vendored
29
vendor/k8s.io/kubernetes/test/integration/utils.go
generated
vendored
@ -23,8 +23,12 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/storage/storagebackend"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
coreclient "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
)
|
||||
|
||||
func DeletePodOrErrorf(t *testing.T, c clientset.Interface, ns, name string) {
|
||||
@ -62,3 +66,28 @@ func WaitForPodToDisappear(podClient coreclient.PodInterface, podName string, in
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func GetEtcdKVClient(config storagebackend.Config) (clientv3.KV, error) {
|
||||
tlsInfo := transport.TLSInfo{
|
||||
CertFile: config.CertFile,
|
||||
KeyFile: config.KeyFile,
|
||||
CAFile: config.CAFile,
|
||||
}
|
||||
|
||||
tlsConfig, err := tlsInfo.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := clientv3.Config{
|
||||
Endpoints: config.ServerList,
|
||||
TLS: tlsConfig,
|
||||
}
|
||||
|
||||
c, err := clientv3.New(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return clientv3.NewKV(c), nil
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/test/integration/volume/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/test/integration/volume/BUILD
generated
vendored
@ -13,7 +13,6 @@ go_test(
|
||||
"main_test.go",
|
||||
"persistent_volumes_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/volume",
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
@ -24,7 +23,7 @@ go_test(
|
||||
"//pkg/controller/volume/persistentvolume:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
|
12
vendor/k8s.io/kubernetes/test/integration/volume/attach_detach_test.go
generated
vendored
12
vendor/k8s.io/kubernetes/test/integration/volume/attach_detach_test.go
generated
vendored
@ -34,7 +34,7 @@ import (
|
||||
volumecache "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
@ -86,7 +86,7 @@ func TestPodDeletionWithDswp(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-sandbox",
|
||||
Annotations: map[string]string{
|
||||
volumehelper.ControllerManagedAttachAnnotation: "true",
|
||||
util.ControllerManagedAttachAnnotation: "true",
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -152,7 +152,7 @@ func TestPodUpdateWithWithADC(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-sandbox",
|
||||
Annotations: map[string]string{
|
||||
volumehelper.ControllerManagedAttachAnnotation: "true",
|
||||
util.ControllerManagedAttachAnnotation: "true",
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -219,8 +219,8 @@ func TestPodUpdateWithKeepTerminatedPodVolumes(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-sandbox",
|
||||
Annotations: map[string]string{
|
||||
volumehelper.ControllerManagedAttachAnnotation: "true",
|
||||
volumehelper.KeepTerminatedPodVolumesAnnotation: "true",
|
||||
util.ControllerManagedAttachAnnotation: "true",
|
||||
util.KeepTerminatedPodVolumesAnnotation: "true",
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -383,7 +383,7 @@ func TestPodAddedByDswp(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-sandbox",
|
||||
Annotations: map[string]string{
|
||||
volumehelper.ControllerManagedAttachAnnotation: "true",
|
||||
util.ControllerManagedAttachAnnotation: "true",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/test/integration/volume/persistent_volumes_test.go
generated
vendored
1
vendor/k8s.io/kubernetes/test/integration/volume/persistent_volumes_test.go
generated
vendored
@ -1135,6 +1135,7 @@ func createClients(ns *v1.Namespace, t *testing.T, s *httptest.Server, syncPerio
|
||||
VolumeInformer: informers.Core().V1().PersistentVolumes(),
|
||||
ClaimInformer: informers.Core().V1().PersistentVolumeClaims(),
|
||||
ClassInformer: informers.Storage().V1().StorageClasses(),
|
||||
PodInformer: informers.Core().V1().Pods(),
|
||||
EnableDynamicProvisioning: true,
|
||||
})
|
||||
if err != nil {
|
||||
|
Reference in New Issue
Block a user